Merge branch 'timers-nohz-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 23 Jun 2015 02:20:04 +0000 (19:20 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 23 Jun 2015 02:20:04 +0000 (19:20 -0700)
Pull NOHZ updates from Thomas Gleixner:
 "A few updates to the nohz infrastructure:

   - recursion protection for context tracking

   - make the TIF_NOHZ inheritance smarter

   - isolate cpus which belong to the NOHZ full set"

* 'timers-nohz-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  nohz: Set isolcpus when nohz_full is set
  nohz: Add tick_nohz_full_add_cpus_to() API
  context_tracking: Inherit TIF_NOHZ through forks instead of context switches
  context_tracking: Protect against recursion

1954 files changed:
CREDITS
Documentation/ABI/testing/sysfs-devices-system-cpu
Documentation/ABI/testing/sysfs-firmware-efi
Documentation/ABI/testing/sysfs-firmware-efi-esrt [new file with mode: 0644]
Documentation/RCU/arrayRCU.txt
Documentation/RCU/lockdep.txt
Documentation/RCU/rcu_dereference.txt
Documentation/RCU/whatisRCU.txt
Documentation/acpi/enumeration.txt
Documentation/acpi/gpio-properties.txt
Documentation/cputopology.txt
Documentation/devicetree/bindings/arm/armv7m_systick.txt [new file with mode: 0644]
Documentation/devicetree/bindings/arm/omap/l3-noc.txt
Documentation/devicetree/bindings/clock/at91-clock.txt
Documentation/devicetree/bindings/clock/silabs,si5351.txt
Documentation/devicetree/bindings/dma/fsl-mxs-dma.txt
Documentation/devicetree/bindings/input/touchscreen/tsc2005.txt
Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt [new file with mode: 0644]
Documentation/devicetree/bindings/mtd/m25p80.txt [deleted file]
Documentation/devicetree/bindings/net/cdns-emac.txt
Documentation/devicetree/bindings/timer/nxp,lpc3220-timer.txt [new file with mode: 0644]
Documentation/devicetree/bindings/timer/st,stm32-timer.txt [new file with mode: 0644]
Documentation/devicetree/bindings/usb/renesas_usbhs.txt
Documentation/filesystems/Locking
Documentation/filesystems/automount-support.txt
Documentation/filesystems/porting
Documentation/filesystems/vfs.txt
Documentation/hwmon/tmp401
Documentation/i2c/slave-interface
Documentation/kernel-parameters.txt
Documentation/memory-barriers.txt
Documentation/networking/udplite.txt
Documentation/preempt-locking.txt
Documentation/scheduler/sched-deadline.txt
Documentation/serial/tty.txt
Documentation/target/tcmu-design.txt
Documentation/virtual/kvm/mmu.txt
Documentation/x86/boot.txt
Documentation/x86/entry_64.txt
Documentation/x86/kernel-stacks [new file with mode: 0644]
Documentation/x86/mtrr.txt
Documentation/x86/pat.txt
Documentation/x86/x86_64/boot-options.txt
Documentation/x86/x86_64/kernel-stacks [deleted file]
Kbuild
MAINTAINERS
Makefile
arch/alpha/boot/Makefile
arch/alpha/boot/main.c
arch/alpha/boot/stdio.c [new file with mode: 0644]
arch/alpha/boot/tools/objstrip.c
arch/alpha/include/asm/cmpxchg.h
arch/alpha/include/asm/types.h
arch/alpha/include/asm/unistd.h
arch/alpha/include/uapi/asm/unistd.h
arch/alpha/kernel/err_ev6.c
arch/alpha/kernel/irq.c
arch/alpha/kernel/osf_sys.c
arch/alpha/kernel/process.c
arch/alpha/kernel/smp.c
arch/alpha/kernel/srmcons.c
arch/alpha/kernel/sys_marvel.c
arch/alpha/kernel/systbls.S
arch/alpha/kernel/traps.c
arch/alpha/mm/fault.c
arch/alpha/oprofile/op_model_ev4.c
arch/alpha/oprofile/op_model_ev5.c
arch/alpha/oprofile/op_model_ev6.c
arch/alpha/oprofile/op_model_ev67.c
arch/arc/Kconfig.debug
arch/arc/include/asm/atomic.h
arch/arc/include/asm/futex.h
arch/arc/include/asm/io.h
arch/arc/mm/cache_arc700.c
arch/arc/mm/fault.c
arch/arm/boot/dts/Makefile
arch/arm/boot/dts/am335x-bone-common.dtsi
arch/arm/boot/dts/am335x-boneblack.dts
arch/arm/boot/dts/am335x-evmsk.dts
arch/arm/boot/dts/am35xx-clocks.dtsi
arch/arm/boot/dts/am437x-sk-evm.dts
arch/arm/boot/dts/am57xx-beagle-x15.dts
arch/arm/boot/dts/armada-375.dtsi
arch/arm/boot/dts/armada-38x.dtsi
arch/arm/boot/dts/armada-39x.dtsi
arch/arm/boot/dts/armada-xp-linksys-mamba.dts
arch/arm/boot/dts/armada-xp-openblocks-ax3-4.dts
arch/arm/boot/dts/dm816x.dtsi
arch/arm/boot/dts/dove-cubox.dts
arch/arm/boot/dts/dra7.dtsi
arch/arm/boot/dts/exynos4412-odroid-common.dtsi
arch/arm/boot/dts/exynos4412-trats2.dts
arch/arm/boot/dts/exynos5250-snow.dts
arch/arm/boot/dts/exynos5420-peach-pit.dts
arch/arm/boot/dts/exynos5420-trip-points.dtsi
arch/arm/boot/dts/exynos5420.dtsi
arch/arm/boot/dts/exynos5440-trip-points.dtsi
arch/arm/boot/dts/exynos5800-peach-pi.dts
arch/arm/boot/dts/imx23-olinuxino.dts
arch/arm/boot/dts/imx25.dtsi
arch/arm/boot/dts/imx27.dtsi
arch/arm/boot/dts/imx28.dtsi
arch/arm/boot/dts/imx6qdl-phytec-pfla02.dtsi
arch/arm/boot/dts/imx6qdl-sabreauto.dtsi
arch/arm/boot/dts/omap3-devkit8000.dts
arch/arm/boot/dts/omap3-n900.dts
arch/arm/boot/dts/omap3.dtsi
arch/arm/boot/dts/omap5.dtsi
arch/arm/boot/dts/r8a7791-koelsch.dts
arch/arm/boot/dts/ste-dbx5x0.dtsi
arch/arm/boot/dts/ste-href.dtsi
arch/arm/boot/dts/ste-snowball.dts
arch/arm/boot/dts/tegra124.dtsi
arch/arm/boot/dts/vexpress-v2p-ca15_a7.dts
arch/arm/boot/dts/vexpress-v2p-ca9.dts
arch/arm/boot/dts/zynq-7000.dtsi
arch/arm/configs/multi_v7_defconfig
arch/arm/configs/omap2plus_defconfig
arch/arm/include/asm/barrier.h
arch/arm/include/asm/dma-iommu.h
arch/arm/include/asm/futex.h
arch/arm/include/asm/io.h
arch/arm/include/asm/topology.h
arch/arm/kernel/entry-common.S
arch/arm/kernel/perf_event_cpu.c
arch/arm/mach-exynos/common.h
arch/arm/mach-exynos/exynos.c
arch/arm/mach-exynos/platsmp.c
arch/arm/mach-exynos/pm_domains.c
arch/arm/mach-exynos/suspend.c
arch/arm/mach-gemini/common.h
arch/arm/mach-gemini/reset.c
arch/arm/mach-imx/devices/platform-sdhci-esdhc-imx.c
arch/arm/mach-imx/gpc.c
arch/arm/mach-omap2/omap_hwmod.c
arch/arm/mach-omap2/omap_hwmod_43xx_data.c
arch/arm/mach-omap2/prcm43xx.h
arch/arm/mach-omap2/prm-regbits-34xx.h
arch/arm/mach-omap2/prm-regbits-44xx.h
arch/arm/mach-omap2/prminst44xx.c
arch/arm/mach-omap2/sleep34xx.S
arch/arm/mach-omap2/timer.c
arch/arm/mach-omap2/vc.c
arch/arm/mach-omap2/vc.h
arch/arm/mach-omap2/vc3xxx_data.c
arch/arm/mach-omap2/vc44xx_data.c
arch/arm/mach-pxa/Kconfig
arch/arm/mach-pxa/Makefile
arch/arm/mach-pxa/include/mach/lubbock.h
arch/arm/mach-pxa/include/mach/mainstone.h
arch/arm/mach-pxa/lubbock.c
arch/arm/mach-pxa/mainstone.c
arch/arm/mach-pxa/pxa_cplds_irqs.c [new file with mode: 0644]
arch/arm/mach-rockchip/pm.c
arch/arm/mach-rockchip/pm.h
arch/arm/mach-rockchip/rockchip.c
arch/arm/mm/dma-mapping.c
arch/arm/mm/fault.c
arch/arm/mm/highmem.c
arch/arm/mm/mmu.c
arch/arm/mm/proc-arm1020.S
arch/arm/mm/proc-arm1020e.S
arch/arm/mm/proc-arm925.S
arch/arm/mm/proc-feroceon.S
arch/arm/net/bpf_jit_32.c
arch/arm/xen/enlighten.c
arch/arm64/boot/dts/arm/juno-motherboard.dtsi
arch/arm64/boot/dts/mediatek/mt8173-evb.dts
arch/arm64/crypto/crc32-arm64.c
arch/arm64/crypto/sha1-ce-glue.c
arch/arm64/crypto/sha2-ce-glue.c
arch/arm64/include/asm/barrier.h
arch/arm64/include/asm/futex.h
arch/arm64/include/asm/io.h
arch/arm64/include/asm/topology.h
arch/arm64/kernel/alternative.c
arch/arm64/kernel/perf_event.c
arch/arm64/mm/dump.c
arch/arm64/mm/fault.c
arch/arm64/net/bpf_jit_comp.c
arch/avr32/include/asm/cmpxchg.h
arch/avr32/include/asm/io.h
arch/avr32/include/asm/uaccess.h
arch/avr32/mm/fault.c
arch/blackfin/include/asm/io.h
arch/cris/mm/fault.c
arch/frv/include/asm/io.h
arch/frv/mm/fault.c
arch/frv/mm/highmem.c
arch/hexagon/include/asm/cmpxchg.h
arch/hexagon/include/asm/uaccess.h
arch/ia64/include/asm/barrier.h
arch/ia64/include/asm/irq_remapping.h
arch/ia64/include/asm/topology.h
arch/ia64/include/uapi/asm/cmpxchg.h
arch/ia64/kernel/msi_ia64.c
arch/ia64/kernel/smpboot.c
arch/ia64/mm/fault.c
arch/ia64/pci/pci.c
arch/m32r/include/asm/cmpxchg.h
arch/m32r/include/asm/io.h
arch/m32r/include/asm/uaccess.h
arch/m32r/kernel/smp.c
arch/m32r/mm/fault.c
arch/m68k/include/asm/cmpxchg.h
arch/m68k/include/asm/io_mm.h
arch/m68k/include/asm/io_no.h
arch/m68k/include/asm/irqflags.h
arch/m68k/mm/fault.c
arch/metag/include/asm/barrier.h
arch/metag/include/asm/cmpxchg.h
arch/metag/include/asm/io.h
arch/metag/mm/fault.c
arch/metag/mm/highmem.c
arch/microblaze/include/asm/io.h
arch/microblaze/include/asm/uaccess.h
arch/microblaze/mm/fault.c
arch/microblaze/mm/highmem.c
arch/mips/Makefile
arch/mips/ath79/prom.c
arch/mips/ath79/setup.c
arch/mips/cobalt/Makefile
arch/mips/configs/fuloong2e_defconfig
arch/mips/include/asm/barrier.h
arch/mips/include/asm/cmpxchg.h
arch/mips/include/asm/elf.h
arch/mips/include/asm/pgtable-bits.h
arch/mips/include/asm/smp.h
arch/mips/include/asm/switch_to.h
arch/mips/include/asm/topology.h
arch/mips/include/asm/uaccess.h
arch/mips/kernel/cpu-probe.c
arch/mips/kernel/elf.c
arch/mips/kernel/irq.c
arch/mips/kernel/ptrace.c
arch/mips/kernel/signal-common.h
arch/mips/kernel/smp-bmips.c
arch/mips/kernel/smp-cps.c
arch/mips/kernel/smp.c
arch/mips/kernel/traps.c
arch/mips/kvm/emulate.c
arch/mips/lib/strnlen_user.S
arch/mips/loongson/common/Makefile
arch/mips/loongson/loongson-3/smp.c
arch/mips/math-emu/cp1emu.c
arch/mips/mm/c-r4k.c
arch/mips/mm/fault.c
arch/mips/mm/highmem.c
arch/mips/mm/init.c
arch/mips/mm/tlb-r4k.c
arch/mips/net/bpf_jit.c
arch/mips/ralink/ill_acc.c
arch/mips/sgi-ip32/ip32-platform.c
arch/mn10300/include/asm/highmem.h
arch/mn10300/include/asm/io.h
arch/mn10300/mm/fault.c
arch/nios2/include/asm/io.h
arch/nios2/mm/fault.c
arch/parisc/include/asm/cacheflush.h
arch/parisc/include/asm/cmpxchg.h
arch/parisc/include/asm/elf.h
arch/parisc/kernel/process.c
arch/parisc/kernel/sys_parisc.c
arch/parisc/kernel/traps.c
arch/parisc/mm/fault.c
arch/powerpc/include/asm/barrier.h
arch/powerpc/include/asm/cmpxchg.h
arch/powerpc/include/asm/topology.h
arch/powerpc/kernel/mce.c
arch/powerpc/kernel/vmlinux.lds.S
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/lib/vmx-helper.c
arch/powerpc/mm/fault.c
arch/powerpc/mm/highmem.c
arch/powerpc/mm/hugetlbpage.c
arch/powerpc/mm/pgtable_64.c
arch/powerpc/mm/tlb_nohash.c
arch/s390/crypto/ghash_s390.c
arch/s390/crypto/prng.c
arch/s390/include/asm/barrier.h
arch/s390/include/asm/cmpxchg.h
arch/s390/include/asm/io.h
arch/s390/include/asm/pgtable.h
arch/s390/include/asm/timex.h
arch/s390/include/asm/topology.h
arch/s390/include/asm/uaccess.h
arch/s390/kernel/debug.c
arch/s390/kernel/time.c
arch/s390/mm/fault.c
arch/s390/net/bpf_jit.h
arch/s390/net/bpf_jit_comp.c
arch/score/include/asm/cmpxchg.h
arch/score/include/asm/uaccess.h
arch/score/lib/string.S
arch/score/mm/fault.c
arch/sh/include/asm/barrier.h
arch/sh/include/asm/cmpxchg.h
arch/sh/mm/fault.c
arch/sparc/include/asm/barrier_64.h
arch/sparc/include/asm/cmpxchg_32.h
arch/sparc/include/asm/cmpxchg_64.h
arch/sparc/include/asm/cpudata_64.h
arch/sparc/include/asm/io_32.h
arch/sparc/include/asm/io_64.h
arch/sparc/include/asm/pgtable_64.h
arch/sparc/include/asm/topology_64.h
arch/sparc/include/asm/trap_block.h
arch/sparc/kernel/entry.h
arch/sparc/kernel/leon_pci_grpci2.c
arch/sparc/kernel/mdesc.c
arch/sparc/kernel/pci.c
arch/sparc/kernel/setup_64.c
arch/sparc/kernel/smp_64.c
arch/sparc/kernel/vmlinux.lds.S
arch/sparc/mm/fault_32.c
arch/sparc/mm/fault_64.c
arch/sparc/mm/highmem.c
arch/sparc/mm/init_64.c
arch/tile/include/asm/atomic_64.h
arch/tile/include/asm/io.h
arch/tile/include/asm/topology.h
arch/tile/include/asm/uaccess.h
arch/tile/mm/fault.c
arch/tile/mm/highmem.c
arch/um/kernel/trap.c
arch/unicore32/mm/fault.c
arch/x86/Kbuild
arch/x86/Kconfig
arch/x86/Kconfig.debug
arch/x86/Makefile
arch/x86/boot/compressed/misc.h
arch/x86/crypto/aesni-intel_glue.c
arch/x86/crypto/camellia_aesni_avx2_glue.c
arch/x86/crypto/camellia_aesni_avx_glue.c
arch/x86/crypto/cast5_avx_glue.c
arch/x86/crypto/cast6_avx_glue.c
arch/x86/crypto/crc32-pclmul_glue.c
arch/x86/crypto/crc32c-intel_glue.c
arch/x86/crypto/crct10dif-pclmul_glue.c
arch/x86/crypto/fpu.c
arch/x86/crypto/ghash-clmulni-intel_glue.c
arch/x86/crypto/serpent_avx2_glue.c
arch/x86/crypto/serpent_avx_glue.c
arch/x86/crypto/sha-mb/sha1_mb.c
arch/x86/crypto/sha1_ssse3_glue.c
arch/x86/crypto/sha256_ssse3_glue.c
arch/x86/crypto/sha512_ssse3_glue.c
arch/x86/crypto/twofish_avx_glue.c
arch/x86/entry/Makefile [new file with mode: 0644]
arch/x86/entry/calling.h [new file with mode: 0644]
arch/x86/entry/entry_32.S [new file with mode: 0644]
arch/x86/entry/entry_64.S [new file with mode: 0644]
arch/x86/entry/entry_64_compat.S [new file with mode: 0644]
arch/x86/entry/syscall_32.c [new file with mode: 0644]
arch/x86/entry/syscall_64.c [new file with mode: 0644]
arch/x86/entry/syscalls/Makefile [new file with mode: 0644]
arch/x86/entry/syscalls/syscall_32.tbl [new file with mode: 0644]
arch/x86/entry/syscalls/syscall_64.tbl [new file with mode: 0644]
arch/x86/entry/syscalls/syscallhdr.sh [new file with mode: 0644]
arch/x86/entry/syscalls/syscalltbl.sh [new file with mode: 0644]
arch/x86/entry/thunk_32.S [new file with mode: 0644]
arch/x86/entry/thunk_64.S [new file with mode: 0644]
arch/x86/entry/vdso/.gitignore [new file with mode: 0644]
arch/x86/entry/vdso/Makefile [new file with mode: 0644]
arch/x86/entry/vdso/checkundef.sh [new file with mode: 0755]
arch/x86/entry/vdso/vclock_gettime.c [new file with mode: 0644]
arch/x86/entry/vdso/vdso-layout.lds.S [new file with mode: 0644]
arch/x86/entry/vdso/vdso-note.S [new file with mode: 0644]
arch/x86/entry/vdso/vdso.lds.S [new file with mode: 0644]
arch/x86/entry/vdso/vdso2c.c [new file with mode: 0644]
arch/x86/entry/vdso/vdso2c.h [new file with mode: 0644]
arch/x86/entry/vdso/vdso32-setup.c [new file with mode: 0644]
arch/x86/entry/vdso/vdso32/.gitignore [new file with mode: 0644]
arch/x86/entry/vdso/vdso32/int80.S [new file with mode: 0644]
arch/x86/entry/vdso/vdso32/note.S [new file with mode: 0644]
arch/x86/entry/vdso/vdso32/sigreturn.S [new file with mode: 0644]
arch/x86/entry/vdso/vdso32/syscall.S [new file with mode: 0644]
arch/x86/entry/vdso/vdso32/sysenter.S [new file with mode: 0644]
arch/x86/entry/vdso/vdso32/vclock_gettime.c [new file with mode: 0644]
arch/x86/entry/vdso/vdso32/vdso-fakesections.c [new file with mode: 0644]
arch/x86/entry/vdso/vdso32/vdso32.lds.S [new file with mode: 0644]
arch/x86/entry/vdso/vdsox32.lds.S [new file with mode: 0644]
arch/x86/entry/vdso/vgetcpu.c [new file with mode: 0644]
arch/x86/entry/vdso/vma.c [new file with mode: 0644]
arch/x86/entry/vsyscall/Makefile [new file with mode: 0644]
arch/x86/entry/vsyscall/vsyscall_64.c [new file with mode: 0644]
arch/x86/entry/vsyscall/vsyscall_emu_64.S [new file with mode: 0644]
arch/x86/entry/vsyscall/vsyscall_gtod.c [new file with mode: 0644]
arch/x86/entry/vsyscall/vsyscall_trace.h [new file with mode: 0644]
arch/x86/ia32/Makefile
arch/x86/ia32/ia32_signal.c
arch/x86/ia32/ia32entry.S [deleted file]
arch/x86/include/asm/alternative-asm.h
arch/x86/include/asm/alternative.h
arch/x86/include/asm/amd_nb.h
arch/x86/include/asm/apic.h
arch/x86/include/asm/asm.h
arch/x86/include/asm/atomic.h
arch/x86/include/asm/atomic64_64.h
arch/x86/include/asm/barrier.h
arch/x86/include/asm/cacheflush.h
arch/x86/include/asm/calling.h [deleted file]
arch/x86/include/asm/cmpxchg.h
arch/x86/include/asm/crypto/glue_helper.h
arch/x86/include/asm/dma-mapping.h
arch/x86/include/asm/dwarf2.h [deleted file]
arch/x86/include/asm/efi.h
arch/x86/include/asm/entry_arch.h
arch/x86/include/asm/fpu-internal.h [deleted file]
arch/x86/include/asm/fpu/api.h [new file with mode: 0644]
arch/x86/include/asm/fpu/internal.h [new file with mode: 0644]
arch/x86/include/asm/fpu/regset.h [new file with mode: 0644]
arch/x86/include/asm/fpu/signal.h [new file with mode: 0644]
arch/x86/include/asm/fpu/types.h [new file with mode: 0644]
arch/x86/include/asm/fpu/xstate.h [new file with mode: 0644]
arch/x86/include/asm/frame.h
arch/x86/include/asm/hardirq.h
arch/x86/include/asm/hpet.h
arch/x86/include/asm/hw_irq.h
arch/x86/include/asm/i387.h [deleted file]
arch/x86/include/asm/io.h
arch/x86/include/asm/io_apic.h
arch/x86/include/asm/irq.h
arch/x86/include/asm/irq_remapping.h
arch/x86/include/asm/irq_vectors.h
arch/x86/include/asm/irqdomain.h [new file with mode: 0644]
arch/x86/include/asm/kvm_host.h
arch/x86/include/asm/mce.h
arch/x86/include/asm/microcode.h
arch/x86/include/asm/microcode_amd.h
arch/x86/include/asm/microcode_intel.h
arch/x86/include/asm/mmu_context.h
arch/x86/include/asm/mpx.h
arch/x86/include/asm/msi.h [new file with mode: 0644]
arch/x86/include/asm/msr-index.h [new file with mode: 0644]
arch/x86/include/asm/msr.h
arch/x86/include/asm/mtrr.h
arch/x86/include/asm/paravirt.h
arch/x86/include/asm/paravirt_types.h
arch/x86/include/asm/pat.h
arch/x86/include/asm/pci.h
arch/x86/include/asm/pgtable.h
arch/x86/include/asm/pgtable_types.h
arch/x86/include/asm/preempt.h
arch/x86/include/asm/processor.h
arch/x86/include/asm/proto.h
arch/x86/include/asm/ptrace.h
arch/x86/include/asm/qspinlock.h [new file with mode: 0644]
arch/x86/include/asm/qspinlock_paravirt.h [new file with mode: 0644]
arch/x86/include/asm/segment.h
arch/x86/include/asm/simd.h
arch/x86/include/asm/smp.h
arch/x86/include/asm/special_insns.h
arch/x86/include/asm/spinlock.h
arch/x86/include/asm/spinlock_types.h
arch/x86/include/asm/stackprotector.h
arch/x86/include/asm/suspend_32.h
arch/x86/include/asm/suspend_64.h
arch/x86/include/asm/thread_info.h
arch/x86/include/asm/topology.h
arch/x86/include/asm/trace/irq_vectors.h
arch/x86/include/asm/trace/mpx.h [new file with mode: 0644]
arch/x86/include/asm/traps.h
arch/x86/include/asm/uaccess.h
arch/x86/include/asm/uaccess_32.h
arch/x86/include/asm/user.h
arch/x86/include/asm/x86_init.h
arch/x86/include/asm/xcr.h [deleted file]
arch/x86/include/asm/xor.h
arch/x86/include/asm/xor_32.h
arch/x86/include/asm/xor_avx.h
arch/x86/include/asm/xsave.h [deleted file]
arch/x86/include/uapi/asm/msr-index.h [deleted file]
arch/x86/include/uapi/asm/msr.h
arch/x86/include/uapi/asm/mtrr.h
arch/x86/include/uapi/asm/sigcontext.h
arch/x86/kernel/Makefile
arch/x86/kernel/acpi/boot.c
arch/x86/kernel/acpi/wakeup_64.S
arch/x86/kernel/alternative.c
arch/x86/kernel/amd_nb.c
arch/x86/kernel/apb_timer.c
arch/x86/kernel/aperture_64.c
arch/x86/kernel/apic/htirq.c
arch/x86/kernel/apic/io_apic.c
arch/x86/kernel/apic/msi.c
arch/x86/kernel/apic/vector.c
arch/x86/kernel/apic/x2apic_phys.c
arch/x86/kernel/asm-offsets.c
arch/x86/kernel/asm-offsets_32.c
arch/x86/kernel/asm-offsets_64.c
arch/x86/kernel/cpu/amd.c
arch/x86/kernel/cpu/bugs.c
arch/x86/kernel/cpu/common.c
arch/x86/kernel/cpu/intel_cacheinfo.c
arch/x86/kernel/cpu/mcheck/mce.c
arch/x86/kernel/cpu/mcheck/mce_amd.c
arch/x86/kernel/cpu/mcheck/mce_intel.c
arch/x86/kernel/cpu/microcode/amd_early.c
arch/x86/kernel/cpu/microcode/core.c
arch/x86/kernel/cpu/microcode/core_early.c
arch/x86/kernel/cpu/microcode/intel.c
arch/x86/kernel/cpu/microcode/intel_early.c
arch/x86/kernel/cpu/microcode/intel_lib.c
arch/x86/kernel/cpu/mshyperv.c
arch/x86/kernel/cpu/mtrr/cleanup.c
arch/x86/kernel/cpu/mtrr/generic.c
arch/x86/kernel/cpu/mtrr/main.c
arch/x86/kernel/cpu/mtrr/mtrr.h
arch/x86/kernel/cpu/perf_event.c
arch/x86/kernel/cpu/perf_event.h
arch/x86/kernel/cpu/perf_event_intel.c
arch/x86/kernel/cpu/perf_event_intel_bts.c
arch/x86/kernel/cpu/perf_event_intel_cqm.c
arch/x86/kernel/cpu/perf_event_intel_ds.c
arch/x86/kernel/cpu/perf_event_intel_lbr.c
arch/x86/kernel/cpu/perf_event_intel_pt.c
arch/x86/kernel/cpu/perf_event_intel_rapl.c
arch/x86/kernel/cpu/perf_event_intel_uncore.c
arch/x86/kernel/cpu/perf_event_intel_uncore.h
arch/x86/kernel/cpu/perf_event_intel_uncore_snb.c
arch/x86/kernel/cpu/perf_event_intel_uncore_snbep.c
arch/x86/kernel/cpu/proc.c
arch/x86/kernel/crash.c
arch/x86/kernel/devicetree.c
arch/x86/kernel/early-quirks.c
arch/x86/kernel/entry_32.S [deleted file]
arch/x86/kernel/entry_64.S [deleted file]
arch/x86/kernel/fpu/Makefile [new file with mode: 0644]
arch/x86/kernel/fpu/bugs.c [new file with mode: 0644]
arch/x86/kernel/fpu/core.c [new file with mode: 0644]
arch/x86/kernel/fpu/init.c [new file with mode: 0644]
arch/x86/kernel/fpu/regset.c [new file with mode: 0644]
arch/x86/kernel/fpu/signal.c [new file with mode: 0644]
arch/x86/kernel/fpu/xstate.c [new file with mode: 0644]
arch/x86/kernel/head64.c
arch/x86/kernel/head_32.S
arch/x86/kernel/head_64.S
arch/x86/kernel/hpet.c
arch/x86/kernel/i386_ksyms_32.c
arch/x86/kernel/i387.c [deleted file]
arch/x86/kernel/i8259.c
arch/x86/kernel/irq.c
arch/x86/kernel/irq_32.c
arch/x86/kernel/irq_64.c
arch/x86/kernel/irq_work.c
arch/x86/kernel/irqinit.c
arch/x86/kernel/kvm.c
arch/x86/kernel/machine_kexec_64.c
arch/x86/kernel/mpparse.c
arch/x86/kernel/paravirt-spinlocks.c
arch/x86/kernel/paravirt.c
arch/x86/kernel/paravirt_patch_32.c
arch/x86/kernel/paravirt_patch_64.c
arch/x86/kernel/pci-dma.c
arch/x86/kernel/pci-swiotlb.c
arch/x86/kernel/process.c
arch/x86/kernel/process_32.c
arch/x86/kernel/process_64.c
arch/x86/kernel/ptrace.c
arch/x86/kernel/setup.c
arch/x86/kernel/signal.c
arch/x86/kernel/smp.c
arch/x86/kernel/smpboot.c
arch/x86/kernel/syscall_32.c [deleted file]
arch/x86/kernel/syscall_64.c [deleted file]
arch/x86/kernel/traps.c
arch/x86/kernel/tsc_sync.c
arch/x86/kernel/uprobes.c
arch/x86/kernel/vsyscall_64.c [deleted file]
arch/x86/kernel/vsyscall_emu_64.S [deleted file]
arch/x86/kernel/vsyscall_gtod.c [deleted file]
arch/x86/kernel/vsyscall_trace.h [deleted file]
arch/x86/kernel/x8664_ksyms_64.c
arch/x86/kernel/x86_init.c
arch/x86/kernel/xsave.c [deleted file]
arch/x86/kvm/cpuid.c
arch/x86/kvm/cpuid.h
arch/x86/kvm/lapic.c
arch/x86/kvm/mmu.c
arch/x86/kvm/mmu.h
arch/x86/kvm/paging_tmpl.h
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c
arch/x86/lguest/boot.c
arch/x86/lib/Makefile
arch/x86/lib/atomic64_386_32.S
arch/x86/lib/atomic64_cx8_32.S
arch/x86/lib/checksum_32.S
arch/x86/lib/clear_page_64.S
arch/x86/lib/cmpxchg16b_emu.S
arch/x86/lib/cmpxchg8b_emu.S
arch/x86/lib/copy_page_64.S
arch/x86/lib/copy_user_64.S
arch/x86/lib/copy_user_nocache_64.S [deleted file]
arch/x86/lib/csum-copy_64.S
arch/x86/lib/getuser.S
arch/x86/lib/iomap_copy_64.S
arch/x86/lib/memcpy_64.S
arch/x86/lib/memmove_64.S
arch/x86/lib/memset_64.S
arch/x86/lib/mmx_32.c
arch/x86/lib/msr-reg.S
arch/x86/lib/putuser.S
arch/x86/lib/rwsem.S
arch/x86/lib/thunk_32.S [deleted file]
arch/x86/lib/thunk_64.S [deleted file]
arch/x86/lib/usercopy_32.c
arch/x86/math-emu/fpu_aux.c
arch/x86/math-emu/fpu_entry.c
arch/x86/math-emu/fpu_system.h
arch/x86/mm/fault.c
arch/x86/mm/highmem_32.c
arch/x86/mm/init.c
arch/x86/mm/iomap_32.c
arch/x86/mm/ioremap.c
arch/x86/mm/mpx.c
arch/x86/mm/pageattr-test.c
arch/x86/mm/pageattr.c
arch/x86/mm/pat.c
arch/x86/mm/pat_internal.h
arch/x86/mm/pat_rbtree.c
arch/x86/mm/pgtable.c
arch/x86/net/bpf_jit.S
arch/x86/net/bpf_jit_comp.c
arch/x86/pci/acpi.c
arch/x86/pci/i386.c
arch/x86/pci/intel_mid_pci.c
arch/x86/pci/irq.c
arch/x86/platform/Makefile
arch/x86/platform/atom/Makefile [new file with mode: 0644]
arch/x86/platform/atom/punit_atom_debug.c [new file with mode: 0644]
arch/x86/platform/efi/efi.c
arch/x86/platform/intel-mid/device_libs/platform_wdt.c
arch/x86/platform/intel-mid/intel-mid.c
arch/x86/platform/intel-mid/sfi.c
arch/x86/platform/sfi/sfi.c
arch/x86/platform/uv/uv_irq.c
arch/x86/power/cpu.c
arch/x86/power/hibernate_asm_64.S
arch/x86/syscalls/Makefile [deleted file]
arch/x86/syscalls/syscall_32.tbl [deleted file]
arch/x86/syscalls/syscall_64.tbl [deleted file]
arch/x86/syscalls/syscallhdr.sh [deleted file]
arch/x86/syscalls/syscalltbl.sh [deleted file]
arch/x86/um/Makefile
arch/x86/um/asm/barrier.h
arch/x86/vdso/.gitignore [deleted file]
arch/x86/vdso/Makefile [deleted file]
arch/x86/vdso/checkundef.sh [deleted file]
arch/x86/vdso/vclock_gettime.c [deleted file]
arch/x86/vdso/vdso-layout.lds.S [deleted file]
arch/x86/vdso/vdso-note.S [deleted file]
arch/x86/vdso/vdso.lds.S [deleted file]
arch/x86/vdso/vdso2c.c [deleted file]
arch/x86/vdso/vdso2c.h [deleted file]
arch/x86/vdso/vdso32-setup.c [deleted file]
arch/x86/vdso/vdso32/.gitignore [deleted file]
arch/x86/vdso/vdso32/int80.S [deleted file]
arch/x86/vdso/vdso32/note.S [deleted file]
arch/x86/vdso/vdso32/sigreturn.S [deleted file]
arch/x86/vdso/vdso32/syscall.S [deleted file]
arch/x86/vdso/vdso32/sysenter.S [deleted file]
arch/x86/vdso/vdso32/vclock_gettime.c [deleted file]
arch/x86/vdso/vdso32/vdso-fakesections.c [deleted file]
arch/x86/vdso/vdso32/vdso32.lds.S [deleted file]
arch/x86/vdso/vdsox32.lds.S [deleted file]
arch/x86/vdso/vgetcpu.c [deleted file]
arch/x86/vdso/vma.c [deleted file]
arch/x86/xen/enlighten.c
arch/x86/xen/p2m.c
arch/x86/xen/spinlock.c
arch/x86/xen/xen-asm_64.S
arch/x86/xen/xen-ops.h
arch/xtensa/include/asm/dma-mapping.h
arch/xtensa/include/asm/io.h
arch/xtensa/mm/fault.c
arch/xtensa/mm/highmem.c
block/blk-core.c
block/blk-mq-cpumap.c
block/blk-mq.c
block/blk-sysfs.c
block/bounce.c
block/elevator.c
block/genhd.c
crypto/Kconfig
crypto/algif_aead.c
drivers/acpi/acpi_pad.c
drivers/acpi/acpi_pnp.c
drivers/acpi/acpica/utglobal.c
drivers/acpi/apei/erst.c
drivers/acpi/osl.c
drivers/acpi/resource.c
drivers/acpi/sbshc.c
drivers/ata/Kconfig
drivers/ata/Makefile
drivers/ata/ahci.c
drivers/ata/ahci_mvebu.c
drivers/ata/ahci_st.c
drivers/ata/libahci.c
drivers/ata/libata-core.c
drivers/ata/libata-eh.c
drivers/ata/pata_octeon_cf.c
drivers/ata/pata_scc.c [deleted file]
drivers/base/cacheinfo.c
drivers/base/init.c
drivers/base/topology.c
drivers/block/Kconfig
drivers/block/loop.c
drivers/block/nvme-core.c
drivers/block/nvme-scsi.c
drivers/block/pmem.c
drivers/block/zram/zram_drv.c
drivers/bluetooth/ath3k.c
drivers/bluetooth/bt3c_cs.c
drivers/bluetooth/btbcm.c
drivers/bluetooth/btbcm.h
drivers/bluetooth/btusb.c
drivers/bluetooth/hci_ath.c
drivers/bus/arm-cci.c
drivers/bus/mips_cdmm.c
drivers/bus/mvebu-mbus.c
drivers/bus/omap_l3_noc.c
drivers/bus/omap_l3_noc.h
drivers/char/hw_random/via-rng.c
drivers/clk/at91/clk-peripheral.c
drivers/clk/at91/clk-pll.c
drivers/clk/at91/pmc.h
drivers/clk/clk-si5351.c
drivers/clk/clk.c
drivers/clk/qcom/gcc-msm8916.c
drivers/clk/samsung/Makefile
drivers/clk/samsung/clk-exynos5420.c
drivers/clk/samsung/clk-exynos5433.c
drivers/clocksource/Kconfig
drivers/clocksource/Makefile
drivers/clocksource/armv7m_systick.c [new file with mode: 0644]
drivers/clocksource/asm9260_timer.c
drivers/clocksource/exynos_mct.c
drivers/clocksource/qcom-timer.c
drivers/clocksource/time-lpc32xx.c [new file with mode: 0644]
drivers/clocksource/timer-integrator-ap.c
drivers/clocksource/timer-stm32.c [new file with mode: 0644]
drivers/clocksource/timer-sun5i.c
drivers/cpufreq/acpi-cpufreq.c
drivers/cpufreq/intel_pstate.c
drivers/cpufreq/p4-clockmod.c
drivers/cpufreq/powernow-k8.c
drivers/cpufreq/speedstep-ich.c
drivers/crypto/caam/caamhash.c
drivers/crypto/caam/caamrng.c
drivers/crypto/padlock-aes.c
drivers/crypto/padlock-sha.c
drivers/crypto/vmx/aes.c
drivers/crypto/vmx/aes_cbc.c
drivers/crypto/vmx/ghash.c
drivers/dma/at_xdmac.c
drivers/dma/dmaengine.c
drivers/dma/hsu/hsu.c
drivers/dma/mic_x100_dma.c
drivers/dma/pl330.c
drivers/extcon/extcon-usb-gpio.c
drivers/firmware/dmi_scan.c
drivers/firmware/efi/Kconfig
drivers/firmware/efi/Makefile
drivers/firmware/efi/efi.c
drivers/firmware/efi/efivars.c
drivers/firmware/efi/esrt.c [new file with mode: 0644]
drivers/firmware/iscsi_ibft.c
drivers/gpio/gpio-kempld.c
drivers/gpio/gpio-omap.c
drivers/gpio/gpiolib-acpi.c
drivers/gpio/gpiolib-sysfs.c
drivers/gpio/gpiolib.c
drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
drivers/gpu/drm/amd/amdkfd/kfd_topology.c
drivers/gpu/drm/drm_ioctl.c
drivers/gpu/drm/drm_irq.c
drivers/gpu/drm/drm_plane_helper.c
drivers/gpu/drm/drm_sysfs.c
drivers/gpu/drm/exynos/exynos7_drm_decon.c
drivers/gpu/drm/exynos/exynos_dp_core.c
drivers/gpu/drm/exynos/exynos_drm_crtc.c
drivers/gpu/drm/exynos/exynos_drm_crtc.h
drivers/gpu/drm/exynos/exynos_drm_drv.h
drivers/gpu/drm/exynos/exynos_drm_fb.c
drivers/gpu/drm/exynos/exynos_drm_fimd.c
drivers/gpu/drm/exynos/exynos_drm_fimd.h [deleted file]
drivers/gpu/drm/exynos/exynos_drm_plane.c
drivers/gpu/drm/exynos/exynos_drm_vidi.c
drivers/gpu/drm/exynos/exynos_mixer.c
drivers/gpu/drm/i915/i915_debugfs.c
drivers/gpu/drm/i915/i915_drv.c
drivers/gpu/drm/i915/i915_gem.c
drivers/gpu/drm/i915/i915_gem_execbuffer.c
drivers/gpu/drm/i915/intel_display.c
drivers/gpu/drm/i915/intel_dp.c
drivers/gpu/drm/i915/intel_i2c.c
drivers/gpu/drm/i915/intel_lrc.c
drivers/gpu/drm/i915/intel_lvds.c
drivers/gpu/drm/i915/intel_pm.c
drivers/gpu/drm/i915/intel_ringbuffer.c
drivers/gpu/drm/i915/intel_sdvo.c
drivers/gpu/drm/mgag200/mgag200_mode.c
drivers/gpu/drm/msm/adreno/adreno_gpu.c
drivers/gpu/drm/msm/dsi/dsi.c
drivers/gpu/drm/msm/dsi/dsi_host.c
drivers/gpu/drm/msm/dsi/dsi_manager.c
drivers/gpu/drm/msm/edp/edp_aux.c
drivers/gpu/drm/msm/edp/edp_connector.c
drivers/gpu/drm/msm/edp/edp_ctrl.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.h
drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
drivers/gpu/drm/msm/msm_drv.c
drivers/gpu/drm/msm/msm_fb.c
drivers/gpu/drm/msm/msm_gem.c
drivers/gpu/drm/msm/msm_iommu.c
drivers/gpu/drm/msm/msm_ringbuffer.c
drivers/gpu/drm/nouveau/include/nvif/class.h
drivers/gpu/drm/nouveau/nvkm/engine/gr/gm204.c
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gf100.c
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm107.c
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/gm204.c
drivers/gpu/drm/nouveau/nvkm/subdev/devinit/nv50.h
drivers/gpu/drm/radeon/atombios_crtc.c
drivers/gpu/drm/radeon/atombios_dp.c
drivers/gpu/drm/radeon/cik.c
drivers/gpu/drm/radeon/dce3_1_afmt.c
drivers/gpu/drm/radeon/evergreen_hdmi.c
drivers/gpu/drm/radeon/ni.c
drivers/gpu/drm/radeon/radeon.h
drivers/gpu/drm/radeon/radeon_asic.c
drivers/gpu/drm/radeon/radeon_asic.h
drivers/gpu/drm/radeon/radeon_audio.c
drivers/gpu/drm/radeon/radeon_connectors.c
drivers/gpu/drm/radeon/radeon_device.c
drivers/gpu/drm/radeon/radeon_dp_auxch.c
drivers/gpu/drm/radeon/radeon_dp_mst.c
drivers/gpu/drm/radeon/radeon_kms.c
drivers/gpu/drm/radeon/radeon_mn.c
drivers/gpu/drm/radeon/radeon_ttm.c
drivers/gpu/drm/radeon/radeon_uvd.c
drivers/gpu/drm/radeon/radeon_vce.c
drivers/gpu/drm/radeon/radeon_vm.c
drivers/gpu/drm/radeon/rv770d.h
drivers/gpu/drm/radeon/si.c
drivers/gpu/drm/radeon/uvd_v1_0.c
drivers/gpu/drm/radeon/uvd_v2_2.c
drivers/gpu/drm/tegra/drm.c
drivers/gpu/drm/vgem/Makefile
drivers/gpu/drm/vgem/vgem_dma_buf.c [deleted file]
drivers/gpu/drm/vgem/vgem_drv.c
drivers/gpu/drm/vgem/vgem_drv.h
drivers/hid/hid-ids.h
drivers/hid/hid-logitech-hidpp.c
drivers/hid/hid-sensor-hub.c
drivers/hid/i2c-hid/i2c-hid.c
drivers/hid/usbhid/hid-quirks.c
drivers/hid/wacom_wac.c
drivers/hwmon/coretemp.c
drivers/hwmon/nct6683.c
drivers/hwmon/nct6775.c
drivers/hwmon/ntc_thermistor.c
drivers/hwmon/tmp401.c
drivers/i2c/busses/i2c-hix5hd2.c
drivers/i2c/busses/i2c-s3c2410.c
drivers/ide/Kconfig
drivers/ide/Makefile
drivers/ide/scc_pata.c [deleted file]
drivers/iio/accel/mma9551_core.c
drivers/iio/accel/mma9553.c
drivers/iio/accel/st_accel_core.c
drivers/iio/adc/axp288_adc.c
drivers/iio/adc/cc10001_adc.c
drivers/iio/adc/mcp320x.c
drivers/iio/adc/qcom-spmi-vadc.c
drivers/iio/adc/twl6030-gpadc.c
drivers/iio/adc/xilinx-xadc-core.c
drivers/iio/adc/xilinx-xadc.h
drivers/iio/common/st_sensors/st_sensors_core.c
drivers/iio/gyro/st_gyro_core.c
drivers/iio/imu/adis16400.h
drivers/iio/imu/adis16400_buffer.c
drivers/iio/imu/adis16400_core.c
drivers/iio/kfifo_buf.c
drivers/iio/light/hid-sensor-prox.c
drivers/iio/magnetometer/st_magn_core.c
drivers/iio/pressure/bmp280.c
drivers/iio/pressure/hid-sensor-press.c
drivers/iio/pressure/st_pressure_core.c
drivers/infiniband/core/addr.c
drivers/infiniband/core/cm.c
drivers/infiniband/core/cm_msgs.h
drivers/infiniband/core/cma.c
drivers/infiniband/core/iwpm_msg.c
drivers/infiniband/core/iwpm_util.c
drivers/infiniband/core/iwpm_util.h
drivers/infiniband/core/umem_odp.c
drivers/infiniband/hw/cxgb4/cm.c
drivers/infiniband/hw/cxgb4/cq.c
drivers/infiniband/hw/cxgb4/device.c
drivers/infiniband/hw/cxgb4/iw_cxgb4.h
drivers/infiniband/hw/cxgb4/mem.c
drivers/infiniband/hw/cxgb4/qp.c
drivers/infiniband/hw/cxgb4/t4.h
drivers/infiniband/hw/cxgb4/t4fw_ri_api.h
drivers/infiniband/hw/ehca/ehca_mcast.c
drivers/infiniband/hw/ipath/Kconfig
drivers/infiniband/hw/ipath/ipath_driver.c
drivers/infiniband/hw/ipath/ipath_kernel.h
drivers/infiniband/hw/ipath/ipath_wc_x86_64.c
drivers/infiniband/hw/mlx4/main.c
drivers/infiniband/hw/mlx5/qp.c
drivers/infiniband/hw/nes/nes.c
drivers/infiniband/hw/nes/nes_cm.c
drivers/infiniband/hw/ocrdma/ocrdma.h
drivers/infiniband/hw/ocrdma/ocrdma_ah.c
drivers/infiniband/hw/ocrdma/ocrdma_hw.c
drivers/infiniband/hw/ocrdma/ocrdma_sli.h
drivers/infiniband/hw/ocrdma/ocrdma_verbs.c
drivers/infiniband/hw/qib/qib.h
drivers/infiniband/hw/qib/qib_file_ops.c
drivers/infiniband/hw/qib/qib_iba6120.c
drivers/infiniband/hw/qib/qib_iba7220.c
drivers/infiniband/hw/qib/qib_iba7322.c
drivers/infiniband/hw/qib/qib_init.c
drivers/infiniband/hw/qib/qib_wc_x86_64.c
drivers/infiniband/ulp/ipoib/ipoib_cm.c
drivers/infiniband/ulp/isert/ib_isert.c
drivers/input/joydev.c
drivers/input/mouse/Kconfig
drivers/input/mouse/alps.c
drivers/input/mouse/elantech.c
drivers/input/mouse/synaptics.c
drivers/input/touchscreen/stmpe-ts.c
drivers/input/touchscreen/sx8654.c
drivers/iommu/amd_iommu.c
drivers/iommu/amd_iommu_init.c
drivers/iommu/amd_iommu_proto.h
drivers/iommu/amd_iommu_types.h
drivers/iommu/amd_iommu_v2.c
drivers/iommu/arm-smmu.c
drivers/iommu/dmar.c
drivers/iommu/intel-iommu.c
drivers/iommu/intel_irq_remapping.c
drivers/iommu/irq_remapping.c
drivers/iommu/irq_remapping.h
drivers/iommu/rockchip-iommu.c
drivers/irqchip/irq-gic-v3-its.c
drivers/irqchip/irq-gic.c
drivers/irqchip/irq-mips-gic.c
drivers/irqchip/irq-sunxi-nmi.c
drivers/irqchip/irq-tegra.c
drivers/lguest/core.c
drivers/lguest/interrupts_and_traps.c
drivers/lguest/x86/core.c
drivers/md/bitmap.c
drivers/md/dm-crypt.c
drivers/md/dm-mpath.c
drivers/md/dm-table.c
drivers/md/dm.c
drivers/md/md.c
drivers/md/raid0.c
drivers/md/raid10.c
drivers/md/raid5.c
drivers/md/raid5.h
drivers/media/Kconfig
drivers/media/pci/ivtv/Kconfig
drivers/media/pci/ivtv/ivtvfb.c
drivers/mfd/da9052-core.c
drivers/mmc/card/block.c
drivers/mmc/card/queue.c
drivers/mmc/card/queue.h
drivers/mmc/core/core.c
drivers/mmc/host/atmel-mci.c
drivers/mmc/host/dw_mmc.c
drivers/mmc/host/sh_mmcif.c
drivers/mtd/devices/m25p80.c
drivers/mtd/tests/readtest.c
drivers/mtd/ubi/block.c
drivers/net/bonding/bond_options.c
drivers/net/can/xilinx_can.c
drivers/net/dsa/mv88e6xxx.c
drivers/net/ethernet/amd/Kconfig
drivers/net/ethernet/amd/xgbe/xgbe-drv.c
drivers/net/ethernet/apm/xgene/Kconfig
drivers/net/ethernet/broadcom/b44.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x.h
drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_main.c
drivers/net/ethernet/broadcom/bnx2x/bnx2x_stats.c
drivers/net/ethernet/broadcom/genet/bcmmii.c
drivers/net/ethernet/brocade/bna/bfa_ioc.c
drivers/net/ethernet/brocade/bna/bnad.c
drivers/net/ethernet/brocade/bna/cna_fwimg.c
drivers/net/ethernet/cadence/macb.c
drivers/net/ethernet/cadence/macb.h
drivers/net/ethernet/cisco/enic/enic_ethtool.c
drivers/net/ethernet/cisco/enic/enic_main.c
drivers/net/ethernet/cisco/enic/vnic_rq.c
drivers/net/ethernet/emulex/benet/be_cmds.c
drivers/net/ethernet/emulex/benet/be_ethtool.c
drivers/net/ethernet/emulex/benet/be_main.c
drivers/net/ethernet/ibm/emac/core.c
drivers/net/ethernet/ibm/emac/core.h
drivers/net/ethernet/intel/e1000e/e1000.h
drivers/net/ethernet/intel/fm10k/fm10k_main.c
drivers/net/ethernet/intel/i40e/i40e.h
drivers/net/ethernet/intel/i40e/i40e_debugfs.c
drivers/net/ethernet/intel/i40e/i40e_main.c
drivers/net/ethernet/intel/i40e/i40e_txrx.c
drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
drivers/net/ethernet/intel/i40evf/i40e_txrx.c
drivers/net/ethernet/intel/igb/igb_main.c
drivers/net/ethernet/intel/igb/igb_ptp.c
drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
drivers/net/ethernet/mellanox/mlx4/cmd.c
drivers/net/ethernet/mellanox/mlx4/en_netdev.c
drivers/net/ethernet/mellanox/mlx4/en_port.c
drivers/net/ethernet/mellanox/mlx4/en_tx.c
drivers/net/ethernet/mellanox/mlx4/resource_tracker.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_init.c
drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c
drivers/net/ethernet/qualcomm/qca_spi.c
drivers/net/ethernet/realtek/r8169.c
drivers/net/ethernet/rocker/rocker.c
drivers/net/ethernet/sfc/efx.c
drivers/net/ethernet/sfc/rx.c
drivers/net/ethernet/smsc/smc91x.c
drivers/net/ethernet/smsc/smsc911x.c
drivers/net/ethernet/stmicro/stmmac/stmmac.h
drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
drivers/net/ethernet/stmicro/stmmac/stmmac_platform.c
drivers/net/ethernet/xilinx/ll_temac_main.c
drivers/net/hyperv/netvsc.c
drivers/net/hyperv/rndis_filter.c
drivers/net/ieee802154/at86rf230.c
drivers/net/macvlan.c
drivers/net/phy/Kconfig
drivers/net/phy/amd-xgbe-phy.c
drivers/net/phy/bcm7xxx.c
drivers/net/phy/dp83640.c
drivers/net/phy/mdio-gpio.c
drivers/net/phy/micrel.c
drivers/net/phy/phy.c
drivers/net/ppp/pppoe.c
drivers/net/usb/cdc_ncm.c
drivers/net/usb/r8152.c
drivers/net/usb/usbnet.c
drivers/net/vxlan.c
drivers/net/wireless/ath/ath9k/xmit.c
drivers/net/wireless/brcm80211/brcmfmac/msgbuf.c
drivers/net/wireless/iwlwifi/Kconfig
drivers/net/wireless/iwlwifi/iwl-7000.c
drivers/net/wireless/iwlwifi/iwl-eeprom-parse.c
drivers/net/wireless/iwlwifi/iwl-eeprom-parse.h
drivers/net/wireless/iwlwifi/iwl-fw-file.h
drivers/net/wireless/iwlwifi/iwl-nvm-parse.c
drivers/net/wireless/iwlwifi/iwl-trans.h
drivers/net/wireless/iwlwifi/mvm/coex_legacy.c
drivers/net/wireless/iwlwifi/mvm/d3.c
drivers/net/wireless/iwlwifi/mvm/fw-api-power.h
drivers/net/wireless/iwlwifi/mvm/fw-api-scan.h
drivers/net/wireless/iwlwifi/mvm/fw-api.h
drivers/net/wireless/iwlwifi/mvm/fw.c
drivers/net/wireless/iwlwifi/mvm/mac80211.c
drivers/net/wireless/iwlwifi/mvm/mvm.h
drivers/net/wireless/iwlwifi/mvm/ops.c
drivers/net/wireless/iwlwifi/mvm/rs.c
drivers/net/wireless/iwlwifi/mvm/rx.c
drivers/net/wireless/iwlwifi/mvm/scan.c
drivers/net/wireless/iwlwifi/pcie/internal.h
drivers/net/wireless/iwlwifi/pcie/trans.c
drivers/net/wireless/iwlwifi/pcie/tx.c
drivers/net/wireless/rtlwifi/usb.c
drivers/net/xen-netback/netback.c
drivers/net/xen-netback/xenbus.c
drivers/net/xen-netfront.c
drivers/ntb/ntb_hw.c
drivers/of/base.c
drivers/of/dynamic.c
drivers/parisc/superio.c
drivers/pci/htirq.c
drivers/pci/quirks.c
drivers/pci/setup-bus.c
drivers/phy/Kconfig
drivers/phy/phy-core.c
drivers/phy/phy-omap-usb2.c
drivers/phy/phy-rcar-gen2.c
drivers/pinctrl/bcm/pinctrl-cygnus-gpio.c
drivers/pinctrl/core.c
drivers/pinctrl/core.h
drivers/pinctrl/devicetree.c
drivers/pinctrl/intel/pinctrl-cherryview.c
drivers/pinctrl/mediatek/pinctrl-mtk-common.c
drivers/pinctrl/meson/pinctrl-meson.c
drivers/pinctrl/meson/pinctrl-meson8b.c
drivers/pinctrl/mvebu/pinctrl-armada-370.c
drivers/pinctrl/qcom/pinctrl-spmi-gpio.c
drivers/pinctrl/qcom/pinctrl-spmi-mpp.c
drivers/platform/x86/thinkpad_acpi.c
drivers/power/axp288_fuel_gauge.c
drivers/power/bq27x00_battery.c
drivers/power/collie_battery.c
drivers/power/reset/Kconfig
drivers/power/reset/at91-reset.c
drivers/power/reset/ltc2952-poweroff.c
drivers/pwm/pwm-img.c
drivers/regulator/da9052-regulator.c
drivers/rtc/rtc-armada38x.c
drivers/s390/crypto/ap_bus.c
drivers/scsi/be2iscsi/be.h
drivers/scsi/be2iscsi/be_cmds.c
drivers/scsi/be2iscsi/be_cmds.h
drivers/scsi/be2iscsi/be_iscsi.c
drivers/scsi/be2iscsi/be_iscsi.h
drivers/scsi/be2iscsi/be_main.c
drivers/scsi/be2iscsi/be_main.h
drivers/scsi/be2iscsi/be_mgmt.c
drivers/scsi/be2iscsi/be_mgmt.h
drivers/scsi/fnic/fnic_debugfs.c
drivers/scsi/fnic/fnic_trace.c
drivers/scsi/lpfc/lpfc_scsi.c
drivers/scsi/qla2xxx/tcm_qla2xxx.c
drivers/scsi/sd.c
drivers/scsi/storvsc_drv.c
drivers/soc/mediatek/Kconfig
drivers/soc/mediatek/mtk-pmic-wrap.c
drivers/spi/Kconfig
drivers/spi/spi-bcm2835.c
drivers/spi/spi-bitbang.c
drivers/spi/spi-fsl-cpm.c
drivers/spi/spi-fsl-espi.c
drivers/spi/spi-omap2-mcspi.c
drivers/spi/spi.c
drivers/ssb/driver_chipcommon_pmu.c
drivers/ssb/driver_pcicore.c
drivers/staging/gdm724x/gdm_mux.c
drivers/staging/lustre/lustre/include/linux/lustre_compat25.h
drivers/staging/lustre/lustre/libcfs/linux/linux-cpu.c
drivers/staging/lustre/lustre/llite/llite_internal.h
drivers/staging/lustre/lustre/llite/symlink.c
drivers/staging/lustre/lustre/ptlrpc/service.c
drivers/staging/ozwpan/ozhcd.c
drivers/staging/ozwpan/ozusbif.h
drivers/staging/ozwpan/ozusbsvc1.c
drivers/staging/rtl8712/rtl8712_led.c
drivers/staging/rtl8712/rtl871x_cmd.c
drivers/staging/rtl8712/rtl871x_ioctl_linux.c
drivers/staging/rtl8712/rtl871x_mlme.c
drivers/staging/rtl8712/rtl871x_pwrctrl.c
drivers/staging/rtl8712/rtl871x_sta_mgt.c
drivers/staging/sm750fb/sm750.c
drivers/staging/vt6655/card.c
drivers/staging/vt6655/card.h
drivers/staging/vt6655/device_main.c
drivers/staging/vt6656/rxtx.c
drivers/target/iscsi/iscsi_target.c
drivers/target/iscsi/iscsi_target_login.c
drivers/target/iscsi/iscsi_target_tpg.c
drivers/target/target_core_alua.c
drivers/target/target_core_configfs.c
drivers/target/target_core_device.c
drivers/target/target_core_file.c
drivers/target/target_core_iblock.c
drivers/target/target_core_internal.h
drivers/target/target_core_pr.c
drivers/target/target_core_pscsi.c
drivers/target/target_core_pscsi.h
drivers/target/target_core_rd.c
drivers/target/target_core_sbc.c
drivers/target/target_core_transport.c
drivers/target/target_core_user.c
drivers/target/target_core_xcopy.c
drivers/thermal/armada_thermal.c
drivers/thermal/intel_powerclamp.c
drivers/thermal/rockchip_thermal.c
drivers/thermal/thermal_core.h
drivers/thermal/ti-soc-thermal/dra752-thermal-data.c
drivers/thermal/ti-soc-thermal/omap5-thermal-data.c
drivers/thermal/ti-soc-thermal/ti-bandgap.c
drivers/thermal/ti-soc-thermal/ti-bandgap.h
drivers/tty/hvc/hvc_xen.c
drivers/tty/mips_ejtag_fdc.c
drivers/tty/n_gsm.c
drivers/tty/n_hdlc.c
drivers/tty/n_tty.c
drivers/tty/pty.c
drivers/tty/serial/8250/8250_omap.c
drivers/tty/serial/amba-pl011.c
drivers/tty/serial/earlycon.c
drivers/tty/serial/imx.c
drivers/tty/serial/omap-serial.c
drivers/tty/tty_buffer.c
drivers/usb/chipidea/debug.c
drivers/usb/core/quirks.c
drivers/usb/dwc3/core.h
drivers/usb/dwc3/dwc3-omap.c
drivers/usb/gadget/configfs.c
drivers/usb/gadget/function/f_fs.c
drivers/usb/gadget/function/f_hid.c
drivers/usb/gadget/function/f_midi.c
drivers/usb/gadget/function/f_uac1.c
drivers/usb/gadget/function/u_serial.c
drivers/usb/gadget/legacy/acm_ms.c
drivers/usb/gadget/legacy/audio.c
drivers/usb/gadget/legacy/cdc2.c
drivers/usb/gadget/legacy/dbgp.c
drivers/usb/gadget/legacy/ether.c
drivers/usb/gadget/legacy/g_ffs.c
drivers/usb/gadget/legacy/gmidi.c
drivers/usb/gadget/legacy/hid.c
drivers/usb/gadget/legacy/mass_storage.c
drivers/usb/gadget/legacy/multi.c
drivers/usb/gadget/legacy/ncm.c
drivers/usb/gadget/legacy/nokia.c
drivers/usb/gadget/legacy/printer.c
drivers/usb/gadget/legacy/serial.c
drivers/usb/gadget/legacy/tcm_usb_gadget.c
drivers/usb/gadget/legacy/webcam.c
drivers/usb/gadget/legacy/zero.c
drivers/usb/gadget/udc/at91_udc.c
drivers/usb/gadget/udc/atmel_usba_udc.c
drivers/usb/gadget/udc/fsl_udc_core.c
drivers/usb/gadget/udc/fusb300_udc.c
drivers/usb/gadget/udc/m66592-udc.c
drivers/usb/gadget/udc/r8a66597-udc.c
drivers/usb/gadget/udc/s3c2410_udc.c
drivers/usb/gadget/udc/udc-xilinx.c
drivers/usb/host/xhci-ring.c
drivers/usb/host/xhci.c
drivers/usb/host/xhci.h
drivers/usb/musb/musb_core.c
drivers/usb/phy/phy-ab8500-usb.c
drivers/usb/phy/phy-isp1301-omap.c
drivers/usb/phy/phy-tahvo.c
drivers/usb/renesas_usbhs/fifo.c
drivers/usb/serial/cp210x.c
drivers/usb/serial/ftdi_sio.c
drivers/usb/serial/ftdi_sio_ids.h
drivers/usb/serial/pl2303.c
drivers/usb/serial/pl2303.h
drivers/usb/serial/visor.c
drivers/usb/storage/unusual_devs.h
drivers/vfio/pci/vfio_pci.c
drivers/vfio/vfio.c
drivers/vhost/scsi.c
drivers/video/backlight/pwm_bl.c
drivers/video/fbdev/amifb.c
drivers/video/fbdev/atafb.c
drivers/video/fbdev/hpfb.c
drivers/virtio/virtio_pci_common.c
drivers/xen/events/events_base.c
fs/9p/v9fs.h
fs/9p/vfs_inode.c
fs/9p/vfs_inode_dotl.c
fs/autofs4/symlink.c
fs/befs/linuxvfs.c
fs/binfmt_elf.c
fs/btrfs/backref.c
fs/btrfs/extent-tree.c
fs/btrfs/extent_io.c
fs/btrfs/free-space-cache.c
fs/btrfs/ordered-data.c
fs/btrfs/volumes.c
fs/ceph/inode.c
fs/cifs/cifs_dfs_ref.c
fs/cifs/cifs_unicode.c
fs/cifs/cifsfs.c
fs/cifs/cifsfs.h
fs/cifs/cifsproto.h
fs/cifs/cifssmb.c
fs/cifs/connect.c
fs/cifs/dir.c
fs/cifs/file.c
fs/cifs/inode.c
fs/cifs/link.c
fs/cifs/readdir.c
fs/cifs/smb1ops.c
fs/cifs/smb2pdu.c
fs/configfs/symlink.c
fs/dcache.c
fs/debugfs/file.c
fs/debugfs/inode.c
fs/ecryptfs/inode.c
fs/exec.c
fs/exofs/Kbuild
fs/exofs/exofs.h
fs/exofs/inode.c
fs/exofs/namei.c
fs/exofs/symlink.c [deleted file]
fs/ext2/inode.c
fs/ext2/namei.c
fs/ext2/symlink.c
fs/ext3/inode.c
fs/ext3/namei.c
fs/ext3/symlink.c
fs/ext4/ext4.h
fs/ext4/ext4_jbd2.c
fs/ext4/extents.c
fs/ext4/inode.c
fs/ext4/namei.c
fs/ext4/super.c
fs/ext4/symlink.c
fs/f2fs/data.c
fs/f2fs/f2fs.h
fs/f2fs/namei.c
fs/f2fs/super.c
fs/fhandle.c
fs/freevxfs/vxfs_extern.h
fs/freevxfs/vxfs_immed.c
fs/freevxfs/vxfs_inode.c
fs/fuse/dir.c
fs/gfs2/inode.c
fs/hostfs/hostfs_kern.c
fs/hppfs/hppfs.c
fs/inode.c
fs/jbd2/recovery.c
fs/jbd2/revoke.c
fs/jbd2/transaction.c
fs/jffs2/dir.c
fs/jffs2/fs.c
fs/jffs2/symlink.c
fs/jfs/inode.c
fs/jfs/namei.c
fs/jfs/symlink.c
fs/kernfs/dir.c
fs/kernfs/symlink.c
fs/libfs.c
fs/logfs/dir.c
fs/mount.h
fs/namei.c
fs/namespace.c
fs/nfs/nfs4proc.c
fs/nfs/symlink.c
fs/nfs/write.c
fs/nfsd/blocklayout.c
fs/nfsd/nfs4callback.c
fs/nfsd/nfs4state.c
fs/nfsd/state.h
fs/nfsd/xdr4.h
fs/ntfs/namei.c
fs/omfs/bitmap.c
fs/omfs/inode.c
fs/open.c
fs/overlayfs/copy_up.c
fs/overlayfs/dir.c
fs/overlayfs/inode.c
fs/overlayfs/super.c
fs/proc/base.c
fs/proc/inode.c
fs/proc/namespaces.c
fs/proc/self.c
fs/proc/thread_self.c
fs/select.c
fs/splice.c
fs/sysv/Makefile
fs/sysv/inode.c
fs/sysv/symlink.c [deleted file]
fs/sysv/sysv.h
fs/ubifs/dir.c
fs/ubifs/file.c
fs/ubifs/super.c
fs/ufs/inode.c
fs/ufs/namei.c
fs/ufs/symlink.c
fs/xfs/libxfs/xfs_attr_leaf.c
fs/xfs/libxfs/xfs_attr_leaf.h
fs/xfs/libxfs/xfs_bmap.c
fs/xfs/libxfs/xfs_ialloc.c
fs/xfs/xfs_attr_inactive.c
fs/xfs/xfs_file.c
fs/xfs/xfs_inode.c
fs/xfs/xfs_iops.c
fs/xfs/xfs_mount.c
include/asm-generic/barrier.h
include/asm-generic/cmpxchg.h
include/asm-generic/futex.h
include/asm-generic/io.h
include/asm-generic/iomap.h
include/asm-generic/pgtable.h
include/asm-generic/preempt.h
include/asm-generic/qspinlock.h [new file with mode: 0644]
include/asm-generic/qspinlock_types.h [new file with mode: 0644]
include/drm/drm_pciids.h
include/linux/alarmtimer.h
include/linux/backing-dev.h
include/linux/blk_types.h
include/linux/blkdev.h
include/linux/bottom_half.h
include/linux/brcmphy.h
include/linux/clockchips.h
include/linux/clocksource.h
include/linux/compiler.h
include/linux/cpumask.h
include/linux/debugfs.h
include/linux/dmar.h
include/linux/efi.h
include/linux/fs.h
include/linux/ftrace_event.h
include/linux/gfp.h
include/linux/hardirq.h
include/linux/hid-sensor-hub.h
include/linux/highmem.h
include/linux/hrtimer.h
include/linux/htirq.h
include/linux/init_task.h
include/linux/intel-iommu.h
include/linux/interrupt.h
include/linux/io-mapping.h
include/linux/io.h
include/linux/irq.h
include/linux/irqchip/arm-gic.h
include/linux/irqdesc.h
include/linux/jiffies.h
include/linux/kernel.h
include/linux/ktime.h
include/linux/lglock.h
include/linux/libata.h
include/linux/lockdep.h
include/linux/memcontrol.h
include/linux/namei.h
include/linux/netdevice.h
include/linux/of.h
include/linux/osq_lock.h
include/linux/percpu_counter.h
include/linux/perf_event.h
include/linux/platform_data/si5351.h
include/linux/preempt.h
include/linux/preempt_mask.h [deleted file]
include/linux/rculist.h
include/linux/rcupdate.h
include/linux/rcutiny.h
include/linux/rcutree.h
include/linux/rhashtable.h
include/linux/sched.h
include/linux/sched/rt.h
include/linux/sched/sysctl.h
include/linux/security.h
include/linux/seqlock.h
include/linux/skbuff.h
include/linux/spinlock.h
include/linux/tcp.h
include/linux/time64.h
include/linux/timekeeper_internal.h
include/linux/timekeeping.h
include/linux/timer.h
include/linux/timerqueue.h
include/linux/topology.h
include/linux/tty.h
include/linux/uaccess.h
include/linux/uidgid.h
include/linux/wait.h
include/net/cfg802154.h
include/net/codel.h
include/net/inet_connection_sock.h
include/net/mac80211.h
include/net/mac802154.h
include/net/sctp/sctp.h
include/net/tcp.h
include/rdma/ib_addr.h
include/rdma/ib_cm.h
include/rdma/iw_portmap.h
include/sound/hda_regmap.h
include/target/target_core_backend.h
include/target/target_core_configfs.h
include/target/target_core_fabric.h
include/trace/events/kmem.h
include/trace/events/sched.h
include/trace/events/timer.h
include/trace/events/writeback.h
include/uapi/drm/radeon_drm.h
include/uapi/linux/inet_diag.h
include/uapi/linux/mpls.h
include/uapi/linux/netfilter/nf_conntrack_tcp.h
include/uapi/linux/perf_event.h
include/uapi/linux/rtnetlink.h
include/uapi/linux/tcp.h
include/uapi/linux/virtio_balloon.h
include/uapi/rdma/rdma_netlink.h
include/xen/events.h
init/Kconfig
init/do_mounts.c
ipc/mqueue.c
kernel/Kconfig.locks
kernel/compat.c
kernel/cpu.c
kernel/events/core.c
kernel/events/internal.h
kernel/events/ring_buffer.c
kernel/fork.c
kernel/futex.c
kernel/irq/chip.c
kernel/irq/dummychip.c
kernel/irq/internals.h
kernel/irq/manage.c
kernel/irq/migration.c
kernel/locking/Makefile
kernel/locking/lglock.c
kernel/locking/lockdep.c
kernel/locking/lockdep_proc.c
kernel/locking/locktorture.c
kernel/locking/mcs_spinlock.h
kernel/locking/qrwlock.c
kernel/locking/qspinlock.c [new file with mode: 0644]
kernel/locking/qspinlock_paravirt.h [new file with mode: 0644]
kernel/locking/rtmutex.c
kernel/locking/rwsem-xadd.c
kernel/module.c
kernel/rcu/rcutorture.c
kernel/rcu/srcu.c
kernel/rcu/tiny.c
kernel/rcu/tiny_plugin.h
kernel/rcu/tree.c
kernel/rcu/tree.h
kernel/rcu/tree_plugin.h
kernel/rcu/tree_trace.c
kernel/rcu/update.c
kernel/sched/Makefile
kernel/sched/auto_group.c
kernel/sched/auto_group.h
kernel/sched/core.c
kernel/sched/cputime.c
kernel/sched/deadline.c
kernel/sched/debug.c
kernel/sched/fair.c
kernel/sched/loadavg.c [new file with mode: 0644]
kernel/sched/proc.c [deleted file]
kernel/sched/rt.c
kernel/sched/sched.h
kernel/sched/stats.h
kernel/sched/wait.c
kernel/signal.c
kernel/stop_machine.c
kernel/sys.c
kernel/sysctl.c
kernel/time/Makefile
kernel/time/alarmtimer.c
kernel/time/clockevents.c
kernel/time/clocksource.c
kernel/time/hrtimer.c
kernel/time/ntp.c
kernel/time/ntp_internal.h
kernel/time/posix-cpu-timers.c
kernel/time/posix-timers.c
kernel/time/tick-broadcast-hrtimer.c
kernel/time/tick-broadcast.c
kernel/time/tick-common.c
kernel/time/tick-internal.h
kernel/time/tick-oneshot.c
kernel/time/tick-sched.c
kernel/time/tick-sched.h
kernel/time/time.c
kernel/time/timeconst.bc
kernel/time/timekeeping.c
kernel/time/timekeeping.h
kernel/time/timer.c
kernel/time/timer_list.c
kernel/time/timer_stats.c
kernel/torture.c
kernel/trace/ring_buffer_benchmark.c
kernel/trace/trace_events_filter.c
kernel/trace/trace_output.c
kernel/watchdog.c
lib/Kconfig.debug
lib/cpu_rmap.c
lib/cpumask.c
lib/mpi/longlong.h
lib/percpu_counter.c
lib/radix-tree.c
lib/raid6/x86.h
lib/rhashtable.c
lib/strnlen_user.c
lib/swiotlb.c
lib/timerqueue.c
mm/backing-dev.c
mm/kmemleak.c
mm/memcontrol.c
mm/memory.c
mm/memory_hotplug.c
mm/mempolicy.c
mm/page-writeback.c
mm/page_isolation.c
mm/shmem.c
mm/zsmalloc.c
net/8021q/vlan.c
net/bluetooth/hci_core.c
net/bridge/br_fdb.c
net/bridge/br_multicast.c
net/bridge/br_netfilter.c
net/bridge/br_stp_timer.c
net/caif/caif_socket.c
net/ceph/osd_client.c
net/core/dev.c
net/core/net_namespace.c
net/core/pktgen.c
net/core/rtnetlink.c
net/core/skbuff.c
net/core/sock.c
net/dsa/dsa.c
net/ieee802154/Makefile
net/ieee802154/nl-phy.c
net/ieee802154/nl802154.c
net/ieee802154/rdev-ops.h
net/ieee802154/trace.c [new file with mode: 0644]
net/ieee802154/trace.h [new file with mode: 0644]
net/ipv4/esp4.c
net/ipv4/fib_trie.c
net/ipv4/inet_diag.c
net/ipv4/ip_vti.c
net/ipv4/netfilter/arp_tables.c
net/ipv4/netfilter/ip_tables.c
net/ipv4/route.c
net/ipv4/tcp.c
net/ipv4/tcp_cong.c
net/ipv4/tcp_dctcp.c
net/ipv4/tcp_fastopen.c
net/ipv4/tcp_illinois.c
net/ipv4/tcp_input.c
net/ipv4/tcp_minisocks.c
net/ipv4/tcp_vegas.c
net/ipv4/tcp_vegas.h
net/ipv4/tcp_westwood.c
net/ipv4/udp.c
net/ipv6/addrconf_core.c
net/ipv6/esp6.c
net/ipv6/ip6_fib.c
net/ipv6/ip6_output.c
net/ipv6/ip6_vti.c
net/ipv6/netfilter/ip6_tables.c
net/ipv6/route.c
net/ipv6/tcp_ipv6.c
net/ipv6/udp.c
net/mac80211/cfg.c
net/mac80211/ieee80211_i.h
net/mac80211/iface.c
net/mac80211/key.c
net/mac80211/key.h
net/mac80211/rx.c
net/mac80211/sta_info.c
net/mac80211/util.c
net/mac80211/wep.c
net/mac802154/cfg.c
net/mac802154/ieee802154_i.h
net/mac802154/iface.c
net/mac802154/llsec.c
net/mac802154/main.c
net/mpls/af_mpls.c
net/mpls/internal.h
net/netfilter/Kconfig
net/netfilter/ipvs/ip_vs_ctl.c
net/netfilter/nf_conntrack_proto_tcp.c
net/netfilter/nf_tables_api.c
net/netfilter/nfnetlink_log.c
net/netfilter/nfnetlink_queue_core.c
net/netlink/af_netlink.c
net/openvswitch/vport-netdev.c
net/packet/af_packet.c
net/rds/connection.c
net/rds/ib_cm.c
net/rds/tcp_connect.c
net/rds/tcp_listen.c
net/sched/cls_api.c
net/sched/sch_api.c
net/sched/sch_codel.c
net/sched/sch_fq_codel.c
net/sched/sch_gred.c
net/sctp/auth.c
net/sunrpc/auth_gss/gss_rpc_xdr.c
net/switchdev/switchdev.c
net/tipc/socket.c
net/unix/af_unix.c
net/wireless/wext-compat.c
net/xfrm/xfrm_input.c
net/xfrm/xfrm_replay.c
net/xfrm/xfrm_state.c
scripts/checkpatch.pl
scripts/checksyscalls.sh
scripts/gdb/linux/modules.py
security/capability.c
security/security.c
security/selinux/avc.c
security/selinux/hooks.c
security/selinux/include/avc.h
sound/atmel/ac97c.c
sound/core/hrtimer.c
sound/core/pcm_lib.c
sound/drivers/pcsp/pcsp.c
sound/hda/hdac_regmap.c
sound/mips/Kconfig
sound/pci/asihpi/hpioctl.c
sound/pci/hda/hda_codec.c
sound/pci/hda/hda_generic.c
sound/pci/hda/hda_intel.c
sound/pci/hda/hda_local.h
sound/pci/hda/patch_conexant.c
sound/pci/hda/patch_realtek.c
sound/pci/hda/patch_sigmatel.c
sound/pci/hda/patch_via.c
sound/pci/hda/thinkpad_helper.c
sound/soc/codecs/mc13783.c
sound/soc/codecs/uda1380.c
sound/soc/codecs/wm8960.c
sound/soc/codecs/wm8994.c
sound/soc/davinci/davinci-mcasp.c
sound/soc/soc-dapm.c
sound/usb/mixer.c
sound/usb/mixer_maps.c
sound/usb/quirks.c
tools/Makefile
tools/arch/alpha/include/asm/barrier.h [new file with mode: 0644]
tools/arch/arm/include/asm/barrier.h [new file with mode: 0644]
tools/arch/arm64/include/asm/barrier.h [new file with mode: 0644]
tools/arch/ia64/include/asm/barrier.h [new file with mode: 0644]
tools/arch/mips/include/asm/barrier.h [new file with mode: 0644]
tools/arch/powerpc/include/asm/barrier.h [new file with mode: 0644]
tools/arch/s390/include/asm/barrier.h [new file with mode: 0644]
tools/arch/sh/include/asm/barrier.h [new file with mode: 0644]
tools/arch/sparc/include/asm/barrier.h [new file with mode: 0644]
tools/arch/sparc/include/asm/barrier_32.h [new file with mode: 0644]
tools/arch/sparc/include/asm/barrier_64.h [new file with mode: 0644]
tools/arch/tile/include/asm/barrier.h [new file with mode: 0644]
tools/arch/x86/include/asm/atomic.h [new file with mode: 0644]
tools/arch/x86/include/asm/barrier.h [new file with mode: 0644]
tools/arch/x86/include/asm/rmwcc.h [new file with mode: 0644]
tools/arch/xtensa/include/asm/barrier.h [new file with mode: 0644]
tools/build/Makefile.build
tools/build/Makefile.feature
tools/build/tests/ex/Build
tools/build/tests/ex/empty2/README [new file with mode: 0644]
tools/include/asm-generic/atomic-gcc.h [new file with mode: 0644]
tools/include/asm-generic/barrier.h [new file with mode: 0644]
tools/include/asm/atomic.h [new file with mode: 0644]
tools/include/asm/barrier.h [new file with mode: 0644]
tools/include/linux/atomic.h [new file with mode: 0644]
tools/include/linux/compiler.h
tools/include/linux/kernel.h [new file with mode: 0644]
tools/include/linux/list.h [new file with mode: 0644]
tools/include/linux/poison.h [new file with mode: 0644]
tools/include/linux/types.h
tools/lib/lockdep/Makefile
tools/lib/lockdep/uinclude/linux/kernel.h
tools/lib/traceevent/.gitignore
tools/lib/traceevent/Makefile
tools/lib/traceevent/event-parse.c
tools/lib/traceevent/event-parse.h
tools/lib/traceevent/plugin_cfg80211.c
tools/net/bpf_jit_disasm.c
tools/perf/.gitignore
tools/perf/Documentation/callchain-overhead-calculation.txt [new file with mode: 0644]
tools/perf/Documentation/perf-bench.txt
tools/perf/Documentation/perf-inject.txt
tools/perf/Documentation/perf-kmem.txt
tools/perf/Documentation/perf-kvm.txt
tools/perf/Documentation/perf-probe.txt
tools/perf/Documentation/perf-record.txt
tools/perf/Documentation/perf-report.txt
tools/perf/Documentation/perf-script.txt
tools/perf/Documentation/perf-top.txt
tools/perf/Documentation/perf-trace.txt
tools/perf/MANIFEST
tools/perf/Makefile
tools/perf/Makefile.perf
tools/perf/arch/arm64/Build
tools/perf/arch/arm64/include/perf_regs.h
tools/perf/arch/arm64/tests/Build [new file with mode: 0644]
tools/perf/arch/arm64/tests/dwarf-unwind.c [new file with mode: 0644]
tools/perf/arch/arm64/tests/regs_load.S [new file with mode: 0644]
tools/perf/arch/common.c
tools/perf/arch/powerpc/util/Build
tools/perf/arch/powerpc/util/sym-handling.c [new file with mode: 0644]
tools/perf/bench/Build
tools/perf/bench/bench.h
tools/perf/bench/futex-wake-parallel.c [new file with mode: 0644]
tools/perf/bench/futex-wake.c
tools/perf/bench/numa.c
tools/perf/builtin-annotate.c
tools/perf/builtin-bench.c
tools/perf/builtin-buildid-list.c
tools/perf/builtin-diff.c
tools/perf/builtin-inject.c
tools/perf/builtin-kmem.c
tools/perf/builtin-kvm.c
tools/perf/builtin-lock.c
tools/perf/builtin-mem.c
tools/perf/builtin-probe.c
tools/perf/builtin-record.c
tools/perf/builtin-report.c
tools/perf/builtin-sched.c
tools/perf/builtin-script.c
tools/perf/builtin-stat.c
tools/perf/builtin-timechart.c
tools/perf/builtin-top.c
tools/perf/builtin-trace.c
tools/perf/config/Makefile
tools/perf/config/utilities.mak
tools/perf/perf-sys.h
tools/perf/perf.h
tools/perf/tests/Build
tools/perf/tests/builtin-test.c
tools/perf/tests/code-reading.c
tools/perf/tests/dso-data.c
tools/perf/tests/dwarf-unwind.c
tools/perf/tests/evsel-roundtrip-name.c
tools/perf/tests/hists_common.c
tools/perf/tests/hists_cumulate.c
tools/perf/tests/hists_filter.c
tools/perf/tests/hists_link.c
tools/perf/tests/hists_output.c
tools/perf/tests/keep-tracking.c
tools/perf/tests/kmod-path.c
tools/perf/tests/make
tools/perf/tests/mmap-basic.c
tools/perf/tests/mmap-thread-lookup.c
tools/perf/tests/open-syscall-all-cpus.c [deleted file]
tools/perf/tests/open-syscall-tp-fields.c [deleted file]
tools/perf/tests/open-syscall.c [deleted file]
tools/perf/tests/openat-syscall-all-cpus.c [new file with mode: 0644]
tools/perf/tests/openat-syscall-tp-fields.c [new file with mode: 0644]
tools/perf/tests/openat-syscall.c [new file with mode: 0644]
tools/perf/tests/parse-events.c
tools/perf/tests/perf-time-to-tsc.c
tools/perf/tests/pmu.c
tools/perf/tests/switch-tracking.c
tools/perf/tests/tests.h
tools/perf/tests/thread-mg-share.c
tools/perf/tests/vmlinux-kallsyms.c
tools/perf/ui/browsers/annotate.c
tools/perf/ui/browsers/hists.c
tools/perf/ui/tui/setup.c
tools/perf/util/Build
tools/perf/util/annotate.c
tools/perf/util/annotate.h
tools/perf/util/auxtrace.c [new file with mode: 0644]
tools/perf/util/auxtrace.h [new file with mode: 0644]
tools/perf/util/build-id.c
tools/perf/util/cache.h
tools/perf/util/callchain.h
tools/perf/util/cgroup.c
tools/perf/util/cgroup.h
tools/perf/util/comm.c
tools/perf/util/data-convert-bt.c
tools/perf/util/db-export.c
tools/perf/util/dso.c
tools/perf/util/dso.h
tools/perf/util/dwarf-aux.c
tools/perf/util/dwarf-aux.h
tools/perf/util/environment.c
tools/perf/util/event.c
tools/perf/util/event.h
tools/perf/util/evlist.c
tools/perf/util/evlist.h
tools/perf/util/evsel.c
tools/perf/util/evsel.h
tools/perf/util/header.c
tools/perf/util/header.h
tools/perf/util/hist.c
tools/perf/util/hist.h
tools/perf/util/include/linux/kernel.h [deleted file]
tools/perf/util/include/linux/list.h [deleted file]
tools/perf/util/include/linux/poison.h [deleted file]
tools/perf/util/include/linux/rbtree.h
tools/perf/util/machine.c
tools/perf/util/machine.h
tools/perf/util/map.c
tools/perf/util/map.h
tools/perf/util/pager.c
tools/perf/util/parse-branch-options.c [new file with mode: 0644]
tools/perf/util/parse-branch-options.h [new file with mode: 0644]
tools/perf/util/parse-events.c
tools/perf/util/parse-events.h
tools/perf/util/parse-events.l
tools/perf/util/parse-events.y
tools/perf/util/parse-options.h
tools/perf/util/pmu.c
tools/perf/util/pmu.h
tools/perf/util/probe-event.c
tools/perf/util/probe-event.h
tools/perf/util/probe-finder.c
tools/perf/util/probe-finder.h
tools/perf/util/pstack.c
tools/perf/util/pstack.h
tools/perf/util/python-ext-sources
tools/perf/util/record.c
tools/perf/util/session.c
tools/perf/util/session.h
tools/perf/util/sort.c
tools/perf/util/sort.h
tools/perf/util/stat-shadow.c [new file with mode: 0644]
tools/perf/util/stat.c
tools/perf/util/stat.h
tools/perf/util/strfilter.c
tools/perf/util/strfilter.h
tools/perf/util/symbol-elf.c
tools/perf/util/symbol.c
tools/perf/util/symbol.h
tools/perf/util/thread-stack.c
tools/perf/util/thread-stack.h
tools/perf/util/thread.c
tools/perf/util/thread.h
tools/perf/util/thread_map.c
tools/perf/util/tool.h
tools/perf/util/trace-event-parse.c
tools/perf/util/unwind-libunwind.c
tools/perf/util/util.c
tools/perf/util/util.h
tools/perf/util/vdso.c
tools/perf/util/vdso.h
tools/perf/util/xyarray.c
tools/perf/util/xyarray.h
tools/power/x86/turbostat/Makefile
tools/power/x86/turbostat/turbostat.c
tools/testing/selftests/rcutorture/bin/configinit.sh
tools/testing/selftests/rcutorture/bin/kvm-recheck.sh
tools/testing/selftests/rcutorture/bin/kvm.sh
tools/testing/selftests/rcutorture/configs/rcu/CFcommon
tools/testing/selftests/rcutorture/configs/rcu/SRCU-N
tools/testing/selftests/rcutorture/configs/rcu/SRCU-P
tools/testing/selftests/rcutorture/configs/rcu/SRCU-P.boot
tools/testing/selftests/rcutorture/configs/rcu/TASKS01
tools/testing/selftests/rcutorture/configs/rcu/TASKS02
tools/testing/selftests/rcutorture/configs/rcu/TASKS03
tools/testing/selftests/rcutorture/configs/rcu/TINY02
tools/testing/selftests/rcutorture/configs/rcu/TINY02.boot
tools/testing/selftests/rcutorture/configs/rcu/TREE01
tools/testing/selftests/rcutorture/configs/rcu/TREE02
tools/testing/selftests/rcutorture/configs/rcu/TREE02-T
tools/testing/selftests/rcutorture/configs/rcu/TREE03
tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot [new file with mode: 0644]
tools/testing/selftests/rcutorture/configs/rcu/TREE04
tools/testing/selftests/rcutorture/configs/rcu/TREE05
tools/testing/selftests/rcutorture/configs/rcu/TREE06
tools/testing/selftests/rcutorture/configs/rcu/TREE06.boot
tools/testing/selftests/rcutorture/configs/rcu/TREE07
tools/testing/selftests/rcutorture/configs/rcu/TREE08
tools/testing/selftests/rcutorture/configs/rcu/TREE08-T
tools/testing/selftests/rcutorture/configs/rcu/TREE08-T.boot [new file with mode: 0644]
tools/testing/selftests/rcutorture/configs/rcu/TREE08.boot
tools/testing/selftests/rcutorture/configs/rcu/TREE09
tools/testing/selftests/rcutorture/doc/TREE_RCU-kconfig.txt
tools/testing/selftests/timers/leap-a-day.c
tools/testing/selftests/x86/Makefile
tools/testing/selftests/x86/check_cc.sh [new file with mode: 0755]
tools/testing/selftests/x86/entry_from_vm86.c [new file with mode: 0644]
tools/testing/selftests/x86/run_x86_tests.sh [deleted file]
tools/testing/selftests/x86/sysret_ss_attrs.c [new file with mode: 0644]
tools/testing/selftests/x86/thunks.S [new file with mode: 0644]
tools/testing/selftests/x86/trivial_32bit_program.c
tools/testing/selftests/x86/trivial_64bit_program.c [new file with mode: 0644]
tools/thermal/tmon/Makefile
tools/vm/Makefile

diff --git a/CREDITS b/CREDITS
index 40cc4bfb34dbec35f2a03f4946e7868fd829c8d1..ec7e6c7fdd1b9c93342d808d1c65d8807e63312f 100644 (file)
--- a/CREDITS
+++ b/CREDITS
@@ -3709,6 +3709,13 @@ N: Dirk Verworner
 D: Co-author of German book ``Linux-Kernel-Programmierung''
 D: Co-founder of Berlin Linux User Group
 
+N: Andrew Victor
+E: linux@maxim.org.za
+W: http://maxim.org.za/at91_26.html
+D: First maintainer of Atmel ARM-based SoC, aka AT91
+D: Introduced support for at91rm9200, the first chip of AT91 family
+S: South Africa
+
 N: Riku Voipio
 E: riku.voipio@iki.fi
 D: Author of PCA9532 LED and Fintek f75375s hwmon driver
index 99983e67c13c9f6aadff74c1969a4d27cede7d26..da95513571ea3e3e53263f6c91588fb58d50f3fb 100644 (file)
@@ -162,7 +162,7 @@ Description:        Discover CPUs in the same CPU frequency coordination domain
 What:          /sys/devices/system/cpu/cpu*/cache/index3/cache_disable_{0,1}
 Date:          August 2008
 KernelVersion: 2.6.27
-Contact:       discuss@x86-64.org
+Contact:       Linux kernel mailing list <linux-kernel@vger.kernel.org>
 Description:   Disable L3 cache indices
 
                These files exist in every CPU's cache/index3 directory. Each
index 05874da7ce80302d92e15781deb90c2f1522c9e4..e794eac32a90a51c4d01577affe71466cde99a1f 100644 (file)
@@ -18,3 +18,13 @@ Contact:     Dave Young <dyoung@redhat.com>
 Description:   It shows the physical address of config table entry in the EFI
                system table.
 Users:         Kexec
+
+What:          /sys/firmware/efi/systab
+Date:          April 2005
+Contact:       linux-efi@vger.kernel.org
+Description:   Displays the physical addresses of all EFI Configuration
+               Tables found via the EFI System Table. The order in
+               which the tables are printed forms an ABI and newer
+               versions are always printed first, i.e. ACPI20 comes
+               before ACPI.
+Users:         dmidecode
diff --git a/Documentation/ABI/testing/sysfs-firmware-efi-esrt b/Documentation/ABI/testing/sysfs-firmware-efi-esrt
new file mode 100644 (file)
index 0000000..6e431d1
--- /dev/null
@@ -0,0 +1,81 @@
+What:          /sys/firmware/efi/esrt/
+Date:          February 2015
+Contact:       Peter Jones <pjones@redhat.com>
+Description:   Provides userland access to read the EFI System Resource Table
+               (ESRT), a catalog of firmware for which can be updated with
+               the UEFI UpdateCapsule mechanism described in section 7.5 of
+               the UEFI Standard.
+Users:         fwupdate - https://github.com/rhinstaller/fwupdate
+
+What:          /sys/firmware/efi/esrt/fw_resource_count
+Date:          February 2015
+Contact:       Peter Jones <pjones@redhat.com>
+Description:   The number of entries in the ESRT
+
+What:          /sys/firmware/efi/esrt/fw_resource_count_max
+Date:          February 2015
+Contact:       Peter Jones <pjones@redhat.com>
+Description:   The maximum number of entries that /could/ be registered
+               in the allocation the table is currently in.  This is
+               really only useful to the system firmware itself.
+
+What:          /sys/firmware/efi/esrt/fw_resource_version
+Date:          February 2015
+Contact:       Peter Jones <pjones@redhat.com>
+Description:   The version of the ESRT structure provided by the firmware.
+
+What:          /sys/firmware/efi/esrt/entries/entry$N/
+Date:          February 2015
+Contact:       Peter Jones <pjones@redhat.com>
+Description:   Each ESRT entry is identified by a GUID, and each gets a
+               subdirectory under entries/ .
+               example: /sys/firmware/efi/esrt/entries/entry0/
+
+What:          /sys/firmware/efi/esrt/entries/entry$N/fw_type
+Date:          February 2015
+Contact:       Peter Jones <pjones@redhat.com>
+Description:   What kind of firmware entry this is:
+               0 - Unknown
+               1 - System Firmware
+               2 - Device Firmware
+               3 - UEFI Driver
+
+What:          /sys/firmware/efi/esrt/entries/entry$N/fw_class
+Date:          February 2015
+Contact:       Peter Jones <pjones@redhat.com>
+Description:   This is the entry's guid, and will match the directory name.
+
+What:          /sys/firmware/efi/esrt/entries/entry$N/fw_version
+Date:          February 2015
+Contact:       Peter Jones <pjones@redhat.com>
+Description:   The version of the firmware currently installed.  This is a
+               32-bit unsigned integer.
+
+What:          /sys/firmware/efi/esrt/entries/entry$N/lowest_supported_fw_version
+Date:          February 2015
+Contact:       Peter Jones <pjones@redhat.com>
+Description:   The lowest version of the firmware that can be installed.
+
+What:          /sys/firmware/efi/esrt/entries/entry$N/capsule_flags
+Date:          February 2015
+Contact:       Peter Jones <pjones@redhat.com>
+Description:   Flags that must be passed to UpdateCapsule()
+
+What:          /sys/firmware/efi/esrt/entries/entry$N/last_attempt_version
+Date:          February 2015
+Contact:       Peter Jones <pjones@redhat.com>
+Description:   The last firmware version for which an update was attempted.
+
+What:          /sys/firmware/efi/esrt/entries/entry$N/last_attempt_status
+Date:          February 2015
+Contact:       Peter Jones <pjones@redhat.com>
+Description:   The result of the last firmware update attempt for the
+               firmware resource entry.
+               0 - Success
+               1 - Insufficient resources
+               2 - Incorrect version
+               3 - Invalid format
+               4 - Authentication error
+               5 - AC power event
+               6 - Battery power event
+
index 453ebe6953eefe0b305a82da60259849756bfea9..f05a9afb2c39b61efb841886e6f272adfa9da797 100644 (file)
@@ -10,7 +10,19 @@ also be used to protect arrays.  Three situations are as follows:
 
 3.  Resizeable Arrays
 
-Each of these situations are discussed below.
+Each of these three situations involves an RCU-protected pointer to an
+array that is separately indexed.  It might be tempting to consider use
+of RCU to instead protect the index into an array, however, this use
+case is -not- supported.  The problem with RCU-protected indexes into
+arrays is that compilers can play way too many optimization games with
+integers, which means that the rules governing handling of these indexes
+are far more trouble than they are worth.  If RCU-protected indexes into
+arrays prove to be particularly valuable (which they have not thus far),
+explicit cooperation from the compiler will be required to permit them
+to be safely used.
+
+That aside, each of the three RCU-protected pointer situations are
+described in the following sections.
 
 
 Situation 1: Hash Tables
@@ -36,9 +48,9 @@ Quick Quiz:  Why is it so important that updates be rare when
 Situation 3: Resizeable Arrays
 
 Use of RCU for resizeable arrays is demonstrated by the grow_ary()
-function used by the System V IPC code.  The array is used to map from
-semaphore, message-queue, and shared-memory IDs to the data structure
-that represents the corresponding IPC construct.  The grow_ary()
+function formerly used by the System V IPC code.  The array is used
+to map from semaphore, message-queue, and shared-memory IDs to the data
+structure that represents the corresponding IPC construct.  The grow_ary()
 function does not acquire any locks; instead its caller must hold the
 ids->sem semaphore.
 
index cd83d2348fef8c4a3ff3bee57fc9904976b46390..da51d306885077b050d6c3624b96cb08c75758bb 100644 (file)
@@ -47,11 +47,6 @@ checking of rcu_dereference() primitives:
                Use explicit check expression "c" along with
                srcu_read_lock_held()().  This is useful in code that
                is invoked by both SRCU readers and updaters.
-       rcu_dereference_index_check(p, c):
-               Use explicit check expression "c", but the caller
-               must supply one of the rcu_read_lock_held() functions.
-               This is useful in code that uses RCU-protected arrays
-               that is invoked by both RCU readers and updaters.
        rcu_dereference_raw(p):
                Don't check.  (Use sparingly, if at all.)
        rcu_dereference_protected(p, c):
@@ -64,11 +59,6 @@ checking of rcu_dereference() primitives:
                but retain the compiler constraints that prevent duplicating
                or coalescsing.  This is useful when when testing the
                value of the pointer itself, for example, against NULL.
-       rcu_access_index(idx):
-               Return the value of the index and omit all barriers, but
-               retain the compiler constraints that prevent duplicating
-               or coalescsing.  This is useful when when testing the
-               value of the index itself, for example, against -1.
 
 The rcu_dereference_check() check expression can be any boolean
 expression, but would normally include a lockdep expression.  However,
index ceb05da5a5acd3e8a6735543284c7a64c1c91228..1e6c0da994f544b6fc3acaa786d4f4f932551db6 100644 (file)
@@ -25,17 +25,6 @@ o    You must use one of the rcu_dereference() family of primitives
        for an example where the compiler can in fact deduce the exact
        value of the pointer, and thus cause misordering.
 
-o      Do not use single-element RCU-protected arrays.  The compiler
-       is within its right to assume that the value of an index into
-       such an array must necessarily evaluate to zero.  The compiler
-       could then substitute the constant zero for the computation, so
-       that the array index no longer depended on the value returned
-       by rcu_dereference().  If the array index no longer depends
-       on rcu_dereference(), then both the compiler and the CPU
-       are within their rights to order the array access before the
-       rcu_dereference(), which can cause the array access to return
-       garbage.
-
 o      Avoid cancellation when using the "+" and "-" infix arithmetic
        operators.  For example, for a given variable "x", avoid
        "(x-x)".  There are similar arithmetic pitfalls from other
@@ -76,14 +65,15 @@ o   Do not use the results from the boolean "&&" and "||" when
        dereferencing.  For example, the following (rather improbable)
        code is buggy:
 
-               int a[2];
-               int index;
-               int force_zero_index = 1;
+               int *p;
+               int *q;
 
                ...
 
-               r1 = rcu_dereference(i1)
-               r2 = a[r1 && force_zero_index];  /* BUGGY!!! */
+               p = rcu_dereference(gp)
+               q = &global_q;
+               q += p != &oom_p1 && p != &oom_p2;
+               r1 = *q;  /* BUGGY!!! */
 
        The reason this is buggy is that "&&" and "||" are often compiled
        using branches.  While weak-memory machines such as ARM or PowerPC
@@ -94,14 +84,15 @@ o   Do not use the results from relational operators ("==", "!=",
        ">", ">=", "<", or "<=") when dereferencing.  For example,
        the following (quite strange) code is buggy:
 
-               int a[2];
-               int index;
-               int flip_index = 0;
+               int *p;
+               int *q;
 
                ...
 
-               r1 = rcu_dereference(i1)
-               r2 = a[r1 != flip_index];  /* BUGGY!!! */
+               p = rcu_dereference(gp)
+               q = &global_q;
+               q += p > &oom_p;
+               r1 = *q;  /* BUGGY!!! */
 
        As before, the reason this is buggy is that relational operators
        are often compiled using branches.  And as before, although
@@ -193,6 +184,11 @@ o  Be very careful about comparing pointers obtained from
                pointer.  Note that the volatile cast in rcu_dereference()
                will normally prevent the compiler from knowing too much.
 
+               However, please note that if the compiler knows that the
+               pointer takes on only one of two values, a not-equal
+               comparison will provide exactly the information that the
+               compiler needs to deduce the value of the pointer.
+
 o      Disable any value-speculation optimizations that your compiler
        might provide, especially if you are making use of feedback-based
        optimizations that take data collected from prior runs.  Such
index 88dfce182f660904a609aa97e670a4ce87659985..5746b0c77f3e4c53da9d68b1213bae991f062092 100644 (file)
@@ -256,7 +256,9 @@ rcu_dereference()
        If you are going to be fetching multiple fields from the
        RCU-protected structure, using the local variable is of
        course preferred.  Repeated rcu_dereference() calls look
-       ugly and incur unnecessary overhead on Alpha CPUs.
+       ugly, do not guarantee that the same pointer will be returned
+       if an update happened while in the critical section, and incur
+       unnecessary overhead on Alpha CPUs.
 
        Note that the value returned by rcu_dereference() is valid
        only within the enclosing RCU read-side critical section.
@@ -879,9 +881,7 @@ SRCU:       Initialization/cleanup
 
 All:  lockdep-checked RCU-protected pointer access
 
-       rcu_access_index
        rcu_access_pointer
-       rcu_dereference_index_check
        rcu_dereference_raw
        rcu_lockdep_assert
        rcu_sleep_check
index 750401f9134190a210c8c24089a08ca9cb16559c..15dfce708ebf6ec3263ac0d66ff2b6f3a57471d4 100644 (file)
@@ -253,7 +253,7 @@ input driver:
 GPIO support
 ~~~~~~~~~~~~
 ACPI 5 introduced two new resources to describe GPIO connections: GpioIo
-and GpioInt. These resources are used be used to pass GPIO numbers used by
+and GpioInt. These resources can be used to pass GPIO numbers used by
 the device to the driver. ACPI 5.1 extended this with _DSD (Device
 Specific Data) which made it possible to name the GPIOs among other things.
 
index ae36fcf86dc7213c8b1a72bb0e698f89e51c878f..f35dad11f0de78955a2e4661f8ef5c9d51eb27d9 100644 (file)
@@ -1,9 +1,9 @@
 _DSD Device Properties Related to GPIO
 --------------------------------------
 
-With the release of ACPI 5.1 and the _DSD configuration objecte names
-can finally be given to GPIOs (and other things as well) returned by
-_CRS.  Previously, we were only able to use an integer index to find
+With the release of ACPI 5.1, the _DSD configuration object finally
+allows names to be given to GPIOs (and other things as well) returned
+by _CRS.  Previously, we were only able to use an integer index to find
 the corresponding GPIO, which is pretty error prone (it depends on
 the _CRS output ordering, for example).
 
index 0aad6deb2d9638e3b0d7bf16db928e1bd0b80d8b..12b1b25b4da9711c95ab013adf1bec4214964d2c 100644 (file)
@@ -1,6 +1,6 @@
 
 Export CPU topology info via sysfs. Items (attributes) are similar
-to /proc/cpuinfo.
+to /proc/cpuinfo output of some architectures:
 
 1) /sys/devices/system/cpu/cpuX/topology/physical_package_id:
 
@@ -23,20 +23,35 @@ to /proc/cpuinfo.
 4) /sys/devices/system/cpu/cpuX/topology/thread_siblings:
 
        internal kernel map of cpuX's hardware threads within the same
-       core as cpuX
+       core as cpuX.
 
-5) /sys/devices/system/cpu/cpuX/topology/core_siblings:
+5) /sys/devices/system/cpu/cpuX/topology/thread_siblings_list:
+
+       human-readable list of cpuX's hardware threads within the same
+       core as cpuX.
+
+6) /sys/devices/system/cpu/cpuX/topology/core_siblings:
 
        internal kernel map of cpuX's hardware threads within the same
        physical_package_id.
 
-6) /sys/devices/system/cpu/cpuX/topology/book_siblings:
+7) /sys/devices/system/cpu/cpuX/topology/core_siblings_list:
+
+       human-readable list of cpuX's hardware threads within the same
+       physical_package_id.
+
+8) /sys/devices/system/cpu/cpuX/topology/book_siblings:
 
        internal kernel map of cpuX's hardware threads within the same
        book_id.
 
+9) /sys/devices/system/cpu/cpuX/topology/book_siblings_list:
+
+       human-readable list of cpuX's hardware threads within the same
+       book_id.
+
 To implement it in an architecture-neutral way, a new source file,
-drivers/base/topology.c, is to export the 4 or 6 attributes. The two book
+drivers/base/topology.c, is to export the 6 or 9 attributes. The three book
 related sysfs files will only be created if CONFIG_SCHED_BOOK is selected.
 
 For an architecture to support this feature, it must define some of
@@ -44,20 +59,22 @@ these macros in include/asm-XXX/topology.h:
 #define topology_physical_package_id(cpu)
 #define topology_core_id(cpu)
 #define topology_book_id(cpu)
-#define topology_thread_cpumask(cpu)
+#define topology_sibling_cpumask(cpu)
 #define topology_core_cpumask(cpu)
 #define topology_book_cpumask(cpu)
 
-The type of **_id is int.
-The type of siblings is (const) struct cpumask *.
+The type of **_id macros is int.
+The type of **_cpumask macros is (const) struct cpumask *. The latter
+correspond with appropriate **_siblings sysfs attributes (except for
+topology_sibling_cpumask() which corresponds with thread_siblings).
 
 To be consistent on all architectures, include/linux/topology.h
 provides default definitions for any of the above macros that are
 not defined by include/asm-XXX/topology.h:
 1) physical_package_id: -1
 2) core_id: 0
-3) thread_siblings: just the given CPU
-4) core_siblings: just the given CPU
+3) sibling_cpumask: just the given CPU
+4) core_cpumask: just the given CPU
 
 For architectures that don't support books (CONFIG_SCHED_BOOK) there are no
 default definitions for topology_book_id() and topology_book_cpumask().
diff --git a/Documentation/devicetree/bindings/arm/armv7m_systick.txt b/Documentation/devicetree/bindings/arm/armv7m_systick.txt
new file mode 100644 (file)
index 0000000..7cf4a24
--- /dev/null
@@ -0,0 +1,26 @@
+* ARMv7M System Timer
+
+ARMv7-M includes a system timer, known as SysTick. Current driver only
+implements the clocksource feature.
+
+Required properties:
+- compatible     : Should be "arm,armv7m-systick"
+- reg            : The address range of the timer
+
+Required clocking property, have to be one of:
+- clocks         : The input clock of the timer
+- clock-frequency : The rate in HZ in input of the ARM SysTick
+
+Examples:
+
+systick: timer@e000e010 {
+       compatible = "arm,armv7m-systick";
+       reg = <0xe000e010 0x10>;
+       clocks = <&clk_systick>;
+};
+
+systick: timer@e000e010 {
+       compatible = "arm,armv7m-systick";
+       reg = <0xe000e010 0x10>;
+       clock-frequency = <90000000>;
+};
index 974624ea68f67d3f16df404bccb57f587ac8a82e..161448da959d26edeb19de7db2561564c4c473dc 100644 (file)
@@ -6,6 +6,7 @@ provided by Arteris.
 Required properties:
 - compatible : Should be "ti,omap3-l3-smx" for OMAP3 family
                Should be "ti,omap4-l3-noc" for OMAP4 family
+               Should be "ti,omap5-l3-noc" for OMAP5 family
               Should be "ti,dra7-l3-noc" for DRA7 family
                Should be "ti,am4372-l3-noc" for AM43 family
 - reg: Contains L3 register address range for each noc domain.
index 7a4d4926f44e47b9a80077192ae9dacbd1089e7e..5ba6450693b9816dfc1dacef96570e14c22a111b 100644 (file)
@@ -248,7 +248,7 @@ Required properties for peripheral clocks:
 - #address-cells : shall be 1 (reg is used to encode clk id).
 - clocks : shall be the master clock phandle.
        e.g. clocks = <&mck>;
-- name: device tree node describing a specific system clock.
+- name: device tree node describing a specific peripheral clock.
        * #clock-cells : from common clock binding; shall be set to 0.
        * reg: peripheral id. See Atmel's datasheets to get a full
          list of peripheral ids.
index c40711e8e8f7df7c27f74d1dcf418909a96bedf2..28b28309f53575e91752cba6453e5594ea985de3 100644 (file)
@@ -17,7 +17,8 @@ Required properties:
 - #clock-cells: from common clock binding; shall be set to 1.
 - clocks: from common clock binding; list of parent clock
   handles, shall be xtal reference clock or xtal and clkin for
-  si5351c only.
+  si5351c only. Corresponding clock input names are "xtal" and
+  "clkin" respectively.
 - #address-cells: shall be set to 1.
 - #size-cells: shall be set to 0.
 
@@ -71,6 +72,7 @@ i2c-master-node {
 
                /* connect xtal input to 25MHz reference */
                clocks = <&ref25>;
+               clock-names = "xtal";
 
                /* connect xtal input as source of pll0 and pll1 */
                silabs,pll-source = <0 0>, <1 0>;
index a4873e5e3e36de172c49f59108360e32aca9fefd..e30e184f50c727aa84d2284914581ce9927dba1c 100644 (file)
@@ -38,7 +38,7 @@ dma_apbx: dma-apbx@80024000 {
                      80 81 68 69
                      70 71 72 73
                      74 75 76 77>;
-       interrupt-names = "auart4-rx", "aurat4-tx", "spdif-tx", "empty",
+       interrupt-names = "auart4-rx", "auart4-tx", "spdif-tx", "empty",
                          "saif0", "saif1", "i2c0", "i2c1",
                          "auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx",
                          "auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx";
index 4b641c7bf1c252a3465aa7e028e18042cb7ad61b..09089a6d69ed8d1c9b29115e6abce75a6d1a2fcd 100644 (file)
@@ -32,8 +32,8 @@ Example:
                touchscreen-fuzz-x = <4>;
                touchscreen-fuzz-y = <7>;
                touchscreen-fuzz-pressure = <2>;
-               touchscreen-max-x = <4096>;
-               touchscreen-max-y = <4096>;
+               touchscreen-size-x = <4096>;
+               touchscreen-size-y = <4096>;
                touchscreen-max-pressure = <2048>;
 
                ti,x-plate-ohms = <280>;
diff --git a/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt b/Documentation/devicetree/bindings/mtd/jedec,spi-nor.txt
new file mode 100644 (file)
index 0000000..2bee681
--- /dev/null
@@ -0,0 +1,32 @@
+* MTD SPI driver for ST M25Pxx (and similar) serial flash chips
+
+Required properties:
+- #address-cells, #size-cells : Must be present if the device has sub-nodes
+  representing partitions.
+- compatible : May include a device-specific string consisting of the
+               manufacturer and name of the chip. Bear in mind the DT binding
+               is not Linux-only, but in case of Linux, see the "m25p_ids"
+               table in drivers/mtd/devices/m25p80.c for the list of supported
+               chips.
+               Must also include "jedec,spi-nor" for any SPI NOR flash that can
+               be identified by the JEDEC READ ID opcode (0x9F).
+- reg : Chip-Select number
+- spi-max-frequency : Maximum frequency of the SPI bus the chip can operate at
+
+Optional properties:
+- m25p,fast-read : Use the "fast read" opcode to read data from the chip instead
+                   of the usual "read" opcode. This opcode is not supported by
+                   all chips and support for it can not be detected at runtime.
+                   Refer to your chips' datasheet to check if this is supported
+                   by your chip.
+
+Example:
+
+       flash: m25p80@0 {
+               #address-cells = <1>;
+               #size-cells = <1>;
+               compatible = "spansion,m25p80", "jedec,spi-nor";
+               reg = <0>;
+               spi-max-frequency = <40000000>;
+               m25p,fast-read;
+       };
diff --git a/Documentation/devicetree/bindings/mtd/m25p80.txt b/Documentation/devicetree/bindings/mtd/m25p80.txt
deleted file mode 100644 (file)
index f20b111..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-* MTD SPI driver for ST M25Pxx (and similar) serial flash chips
-
-Required properties:
-- #address-cells, #size-cells : Must be present if the device has sub-nodes
-  representing partitions.
-- compatible : May include a device-specific string consisting of the
-               manufacturer and name of the chip. Bear in mind the DT binding
-               is not Linux-only, but in case of Linux, see the "m25p_ids"
-               table in drivers/mtd/devices/m25p80.c for the list of supported
-               chips.
-               Must also include "nor-jedec" for any SPI NOR flash that can be
-               identified by the JEDEC READ ID opcode (0x9F).
-- reg : Chip-Select number
-- spi-max-frequency : Maximum frequency of the SPI bus the chip can operate at
-
-Optional properties:
-- m25p,fast-read : Use the "fast read" opcode to read data from the chip instead
-                   of the usual "read" opcode. This opcode is not supported by
-                   all chips and support for it can not be detected at runtime.
-                   Refer to your chips' datasheet to check if this is supported
-                   by your chip.
-
-Example:
-
-       flash: m25p80@0 {
-               #address-cells = <1>;
-               #size-cells = <1>;
-               compatible = "spansion,m25p80", "nor-jedec";
-               reg = <0>;
-               spi-max-frequency = <40000000>;
-               m25p,fast-read;
-       };
index abd67c13d3442228834e7df36a53a71454132160..4451ee9732239b50d8329339eb57662e639c24b4 100644 (file)
@@ -3,7 +3,8 @@
 Required properties:
 - compatible: Should be "cdns,[<chip>-]{emac}"
   Use "cdns,at91rm9200-emac" Atmel at91rm9200 SoC.
-  or the generic form: "cdns,emac".
+  Use "cdns,zynq-gem" Xilinx Zynq-7xxx SoC.
+  Or the generic form: "cdns,emac".
 - reg: Address and length of the register set for the device
 - interrupts: Should contain macb interrupt
 - phy-mode: see ethernet.txt file in the same directory.
diff --git a/Documentation/devicetree/bindings/timer/nxp,lpc3220-timer.txt b/Documentation/devicetree/bindings/timer/nxp,lpc3220-timer.txt
new file mode 100644 (file)
index 0000000..51b05a0
--- /dev/null
@@ -0,0 +1,26 @@
+* NXP LPC3220 timer
+
+The NXP LPC3220 timer is used on a wide range of NXP SoCs. This
+includes LPC32xx, LPC178x, LPC18xx and LPC43xx parts.
+
+Required properties:
+- compatible:
+       Should be "nxp,lpc3220-timer".
+- reg:
+       Address and length of the register set.
+- interrupts:
+       Reference to the timer interrupt
+- clocks:
+       Should contain a reference to timer clock.
+- clock-names:
+       Should contain "timerclk".
+
+Example:
+
+timer1: timer@40085000 {
+       compatible = "nxp,lpc3220-timer";
+       reg = <0x40085000 0x1000>;
+       interrupts = <13>;
+       clocks = <&ccu1 CLK_CPU_TIMER1>;
+       clock-names = "timerclk";
+};
diff --git a/Documentation/devicetree/bindings/timer/st,stm32-timer.txt b/Documentation/devicetree/bindings/timer/st,stm32-timer.txt
new file mode 100644 (file)
index 0000000..8ef28e7
--- /dev/null
@@ -0,0 +1,22 @@
+. STMicroelectronics STM32 timer
+
+The STM32 MCUs family has several general-purpose 16 and 32 bits timers.
+
+Required properties:
+- compatible : Should be "st,stm32-timer"
+- reg : Address and length of the register set
+- clocks : Reference on the timer input clock
+- interrupts : Reference to the timer interrupt
+
+Optional properties:
+- resets: Reference to a reset controller asserting the timer
+
+Example:
+
+timer5: timer@40000c00 {
+       compatible = "st,stm32-timer";
+       reg = <0x40000c00 0x400>;
+       interrupts = <50>;
+       resets = <&rrc 259>;
+       clocks = <&clk_pmtr1>;
+};
index dc2a18f0b3a10a9e1bd5814fc429fe9246b82ec7..ddbe304beb212238e859640905b83886e5164ac7 100644 (file)
@@ -15,10 +15,8 @@ Optional properties:
   - phys: phandle + phy specifier pair
   - phy-names: must be "usb"
   - dmas: Must contain a list of references to DMA specifiers.
-  - dma-names : Must contain a list of DMA names:
-   - tx0 ... tx<n>
-   - rx0 ... rx<n>
-    - This <n> means DnFIFO in USBHS module.
+  - dma-names : named "ch%d", where %d is the channel number ranging from zero
+                to the number of channels (DnFIFOs) minus one.
 
 Example:
        usbhs: usb@e6590000 {
index 0a926e2ba3ab68ffd541f5a619cd673e59826821..6a34a0f4d37ccf33248cfd81d2c917d45d8401bc 100644 (file)
@@ -50,8 +50,8 @@ prototypes:
        int (*rename2) (struct inode *, struct dentry *,
                        struct inode *, struct dentry *, unsigned int);
        int (*readlink) (struct dentry *, char __user *,int);
-       void * (*follow_link) (struct dentry *, struct nameidata *);
-       void (*put_link) (struct dentry *, struct nameidata *, void *);
+       const char *(*follow_link) (struct dentry *, void **);
+       void (*put_link) (struct inode *, void *);
        void (*truncate) (struct inode *);
        int (*permission) (struct inode *, int, unsigned int);
        int (*get_acl)(struct inode *, int);
index 7cac200e2a85dd1efebc9f05e84cb8a2b9532b37..7eb762eb31361739bac381a7453ecb449facf161 100644 (file)
@@ -1,41 +1,15 @@
-Support is available for filesystems that wish to do automounting support (such
-as kAFS which can be found in fs/afs/). This facility includes allowing
-in-kernel mounts to be performed and mountpoint degradation to be
-requested. The latter can also be requested by userspace.
+Support is available for filesystems that wish to do automounting
+support (such as kAFS which can be found in fs/afs/ and NFS in
+fs/nfs/). This facility includes allowing in-kernel mounts to be
+performed and mountpoint degradation to be requested. The latter can
+also be requested by userspace.
 
 
 ======================
 IN-KERNEL AUTOMOUNTING
 ======================
 
-A filesystem can now mount another filesystem on one of its directories by the
-following procedure:
-
- (1) Give the directory a follow_link() operation.
-
-     When the directory is accessed, the follow_link op will be called, and
-     it will be provided with the location of the mountpoint in the nameidata
-     structure (vfsmount and dentry).
-
- (2) Have the follow_link() op do the following steps:
-
-     (a) Call vfs_kern_mount() to call the appropriate filesystem to set up a
-         superblock and gain a vfsmount structure representing it.
-
-     (b) Copy the nameidata provided as an argument and substitute the dentry
-        argument into it the copy.
-
-     (c) Call do_add_mount() to install the new vfsmount into the namespace's
-        mountpoint tree, thus making it accessible to userspace. Use the
-        nameidata set up in (b) as the destination.
-
-        If the mountpoint will be automatically expired, then do_add_mount()
-        should also be given the location of an expiration list (see further
-        down).
-
-     (d) Release the path in the nameidata argument and substitute in the new
-        vfsmount and its root dentry. The ref counts on these will need
-        incrementing.
+See section "Mount Traps" of  Documentation/filesystems/autofs4.txt
 
 Then from userspace, you can just do something like:
 
@@ -61,17 +35,18 @@ AUTOMATIC MOUNTPOINT EXPIRY
 ===========================
 
 Automatic expiration of mountpoints is easy, provided you've mounted the
-mountpoint to be expired in the automounting procedure outlined above.
+mountpoint to be expired in the automounting procedure outlined separately.
 
 To do expiration, you need to follow these steps:
 
- (3) Create at least one list off which the vfsmounts to be expired can be
-     hung. Access to this list will be governed by the vfsmount_lock.
+ (1) Create at least one list off which the vfsmounts to be expired can be
+     hung.
 
- (4) In step (2c) above, the call to do_add_mount() should be provided with a
-     pointer to this list. It will hang the vfsmount off of it if it succeeds.
+ (2) When a new mountpoint is created in the ->d_automount method, add
+     the mnt to the list using mnt_set_expiry()
+             mnt_set_expiry(newmnt, &afs_vfsmounts);
 
- (5) When you want mountpoints to be expired, call mark_mounts_for_expiry()
+ (3) When you want mountpoints to be expired, call mark_mounts_for_expiry()
      with a pointer to this list. This will process the list, marking every
      vfsmount thereon for potential expiry on the next call.
 
index e69274de8d0c9c1754cc40b8d99d78a724e63e50..3eae250254d581d253dd035109d568f8ef0c7873 100644 (file)
@@ -483,3 +483,20 @@ in your dentry operations instead.
 --
 [mandatory]
        ->aio_read/->aio_write are gone.  Use ->read_iter/->write_iter.
+---
+[recommended]
+       for embedded ("fast") symlinks just set inode->i_link to wherever the
+       symlink body is and use simple_follow_link() as ->follow_link().
+--
+[mandatory]
+       calling conventions for ->follow_link() have changed.  Instead of returning
+       cookie and using nd_set_link() to store the body to traverse, we return
+       the body to traverse and store the cookie using explicit void ** argument.
+       nameidata isn't passed at all - nd_jump_link() doesn't need it and
+       nd_[gs]et_link() is gone.
+--
+[mandatory]
+       calling conventions for ->put_link() have changed.  It gets inode instead of
+       dentry,  it does not get nameidata at all and it gets called only when cookie
+       is non-NULL.  Note that link body isn't available anymore, so if you need it,
+       store it as cookie.
index 5d833b32bbcd1046de40a15fee169ed462d274fc..b403b29ef7107cd9bfad4a4d0d509cbeb22f145e 100644 (file)
@@ -350,8 +350,8 @@ struct inode_operations {
        int (*rename2) (struct inode *, struct dentry *,
                        struct inode *, struct dentry *, unsigned int);
        int (*readlink) (struct dentry *, char __user *,int);
-        void * (*follow_link) (struct dentry *, struct nameidata *);
-        void (*put_link) (struct dentry *, struct nameidata *, void *);
+       const char *(*follow_link) (struct dentry *, void **);
+       void (*put_link) (struct inode *, void *);
        int (*permission) (struct inode *, int);
        int (*get_acl)(struct inode *, int);
        int (*setattr) (struct dentry *, struct iattr *);
@@ -436,16 +436,18 @@ otherwise noted.
 
   follow_link: called by the VFS to follow a symbolic link to the
        inode it points to.  Only required if you want to support
-       symbolic links.  This method returns a void pointer cookie
-       that is passed to put_link().
+       symbolic links.  This method returns the symlink body
+       to traverse (and possibly resets the current position with
+       nd_jump_link()).  If the body won't go away until the inode
+       is gone, nothing else is needed; if it needs to be otherwise
+       pinned, the data needed to release whatever we'd grabbed
+       is to be stored in void * variable passed by address to
+       follow_link() instance.
 
   put_link: called by the VFS to release resources allocated by
-       follow_link().  The cookie returned by follow_link() is passed
-       to this method as the last parameter.  It is used by
-       filesystems such as NFS where page cache is not stable
-       (i.e. page that was installed when the symbolic link walk
-       started might not be in the page cache at the end of the
-       walk).
+       follow_link().  The cookie stored by follow_link() is passed
+       to this method as the last parameter; only called when
+       cookie isn't NULL.
 
   permission: called by the VFS to check for access rights on a POSIX-like
        filesystem.
index 8eb88e974055f62f2c9226bcd6d98b3e8e5ea908..711f75e189eba003e31a3f762fa16be04d1012d0 100644 (file)
@@ -20,7 +20,7 @@ Supported chips:
     Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp432.html
   * Texas Instruments TMP435
     Prefix: 'tmp435'
-    Addresses scanned: I2C 0x37, 0x48 - 0x4f
+    Addresses scanned: I2C 0x48 - 0x4f
     Datasheet: http://focus.ti.com/docs/prod/folders/print/tmp435.html
 
 Authors:
index 389bb5d618549e5db99ed5ea8cb1b23f38ca48f4..b228ca54bcf4863cdad2a12e4d2533e4fe689a71 100644 (file)
@@ -31,10 +31,10 @@ User manual
 ===========
 
 I2C slave backends behave like standard I2C clients. So, you can instantiate
-them like described in the document 'instantiating-devices'. A quick example
-for instantiating the slave-eeprom driver from userspace:
+them as described in the document 'instantiating-devices'. A quick example for
+instantiating the slave-eeprom driver from userspace at address 0x64 on bus 1:
 
-  # echo 0-0064 > /sys/bus/i2c/drivers/i2c-slave-eeprom/bind
+  # echo slave-24c02 0x64 > /sys/bus/i2c/devices/i2c-1/new_device
 
 Each backend should come with separate documentation to describe its specific
 behaviour and setup.
index 61ab1628a057cc2c4d8b11d892d834f7e5f7773a..7bd4501f0cf93c5da1310ae95fded41f785f0cb3 100644 (file)
@@ -746,6 +746,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
        cpuidle.off=1   [CPU_IDLE]
                        disable the cpuidle sub-system
 
+       cpu_init_udelay=N
+                       [X86] Delay for N microsec between assert and de-assert
+                       of APIC INIT to start processors.  This delay occurs
+                       on every CPU online, such as boot, and resume from suspend.
+                       Default: 10000
+
        cpcihp_generic= [HW,PCI] Generic port I/O CompactPCI driver
                        Format:
                        <first_slot>,<last_slot>,<port>,<enum_bit>[,<debug>]
@@ -937,6 +943,10 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        Enable debug messages at boot time.  See
                        Documentation/dynamic-debug-howto.txt for details.
 
+       nompx           [X86] Disables Intel Memory Protection Extensions.
+                       See Documentation/x86/intel_mpx.txt for more
+                       information about the feature.
+
        eagerfpu=       [X86]
                        on      enable eager fpu restore
                        off     disable eager fpu restore
@@ -1481,6 +1491,12 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        By default, super page will be supported if Intel IOMMU
                        has the capability. With this option, super page will
                        not be supported.
+               ecs_off [Default Off]
+                       By default, extended context tables will be supported if
+                       the hardware advertises that it has support both for the
+                       extended tables themselves, and also PASID support. With
+                       this option set, extended tables will not be used even
+                       on hardware which claims to support them.
 
        intel_idle.max_cstate=  [KNL,HW,ACPI,X86]
                        0       disables intel_idle and fall back on acpi_idle.
@@ -2992,11 +3008,34 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        Set maximum number of finished RCU callbacks to
                        process in one batch.
 
+       rcutree.dump_tree=      [KNL]
+                       Dump the structure of the rcu_node combining tree
+                       out at early boot.  This is used for diagnostic
+                       purposes, to verify correct tree setup.
+
+       rcutree.gp_cleanup_delay=       [KNL]
+                       Set the number of jiffies to delay each step of
+                       RCU grace-period cleanup.  This only has effect
+                       when CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP is set.
+
        rcutree.gp_init_delay=  [KNL]
                        Set the number of jiffies to delay each step of
                        RCU grace-period initialization.  This only has
-                       effect when CONFIG_RCU_TORTURE_TEST_SLOW_INIT is
-                       set.
+                       effect when CONFIG_RCU_TORTURE_TEST_SLOW_INIT
+                       is set.
+
+       rcutree.gp_preinit_delay=       [KNL]
+                       Set the number of jiffies to delay each step of
+                       RCU grace-period pre-initialization, that is,
+                       the propagation of recent CPU-hotplug changes up
+                       the rcu_node combining tree.  This only has effect
+                       when CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT is set.
+
+       rcutree.rcu_fanout_exact= [KNL]
+                       Disable autobalancing of the rcu_node combining
+                       tree.  This is used by rcutorture, and might
+                       possibly be useful for architectures having high
+                       cache-to-cache transfer latencies.
 
        rcutree.rcu_fanout_leaf= [KNL]
                        Increase the number of CPUs assigned to each
@@ -3101,7 +3140,11 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
                        test, hence the "fake".
 
        rcutorture.nreaders= [KNL]
-                       Set number of RCU readers.
+                       Set number of RCU readers.  The value -1 selects
+                       N-1, where N is the number of CPUs.  A value
+                       "n" less than -1 selects N-n-2, where N is again
+                       the number of CPUs.  For example, -2 selects N
+                       (the number of CPUs), -3 selects N+1, and so on.
 
        rcutorture.object_debug= [KNL]
                        Enable debug-object double-call_rcu() testing.
index f95746189b5ded41d9ae8bdbbcafec4e1da8820e..13feb697271f0a270334dd3807255ed14af7d7ed 100644 (file)
@@ -617,16 +617,16 @@ case what's actually required is:
 However, stores are not speculated.  This means that ordering -is- provided
 for load-store control dependencies, as in the following example:
 
-       q = ACCESS_ONCE(a);
+       q = READ_ONCE_CTRL(a);
        if (q) {
                ACCESS_ONCE(b) = p;
        }
 
-Control dependencies pair normally with other types of barriers.
-That said, please note that ACCESS_ONCE() is not optional!  Without the
-ACCESS_ONCE(), might combine the load from 'a' with other loads from
-'a', and the store to 'b' with other stores to 'b', with possible highly
-counterintuitive effects on ordering.
+Control dependencies pair normally with other types of barriers.  That
+said, please note that READ_ONCE_CTRL() is not optional!  Without the
+READ_ONCE_CTRL(), the compiler might combine the load from 'a' with
+other loads from 'a', and the store to 'b' with other stores to 'b',
+with possible highly counterintuitive effects on ordering.
 
 Worse yet, if the compiler is able to prove (say) that the value of
 variable 'a' is always non-zero, it would be well within its rights
@@ -636,12 +636,15 @@ as follows:
        q = a;
        b = p;  /* BUG: Compiler and CPU can both reorder!!! */
 
-So don't leave out the ACCESS_ONCE().
+Finally, the READ_ONCE_CTRL() includes an smp_read_barrier_depends()
+that DEC Alpha needs in order to respect control depedencies.
+
+So don't leave out the READ_ONCE_CTRL().
 
 It is tempting to try to enforce ordering on identical stores on both
 branches of the "if" statement as follows:
 
-       q = ACCESS_ONCE(a);
+       q = READ_ONCE_CTRL(a);
        if (q) {
                barrier();
                ACCESS_ONCE(b) = p;
@@ -655,7 +658,7 @@ branches of the "if" statement as follows:
 Unfortunately, current compilers will transform this as follows at high
 optimization levels:
 
-       q = ACCESS_ONCE(a);
+       q = READ_ONCE_CTRL(a);
        barrier();
        ACCESS_ONCE(b) = p;  /* BUG: No ordering vs. load from a!!! */
        if (q) {
@@ -685,7 +688,7 @@ memory barriers, for example, smp_store_release():
 In contrast, without explicit memory barriers, two-legged-if control
 ordering is guaranteed only when the stores differ, for example:
 
-       q = ACCESS_ONCE(a);
+       q = READ_ONCE_CTRL(a);
        if (q) {
                ACCESS_ONCE(b) = p;
                do_something();
@@ -694,14 +697,14 @@ ordering is guaranteed only when the stores differ, for example:
                do_something_else();
        }
 
-The initial ACCESS_ONCE() is still required to prevent the compiler from
-proving the value of 'a'.
+The initial READ_ONCE_CTRL() is still required to prevent the compiler
+from proving the value of 'a'.
 
 In addition, you need to be careful what you do with the local variable 'q',
 otherwise the compiler might be able to guess the value and again remove
 the needed conditional.  For example:
 
-       q = ACCESS_ONCE(a);
+       q = READ_ONCE_CTRL(a);
        if (q % MAX) {
                ACCESS_ONCE(b) = p;
                do_something();
@@ -714,7 +717,7 @@ If MAX is defined to be 1, then the compiler knows that (q % MAX) is
 equal to zero, in which case the compiler is within its rights to
 transform the above code into the following:
 
-       q = ACCESS_ONCE(a);
+       q = READ_ONCE_CTRL(a);
        ACCESS_ONCE(b) = p;
        do_something_else();
 
@@ -725,7 +728,7 @@ is gone, and the barrier won't bring it back.  Therefore, if you are
 relying on this ordering, you should make sure that MAX is greater than
 one, perhaps as follows:
 
-       q = ACCESS_ONCE(a);
+       q = READ_ONCE_CTRL(a);
        BUILD_BUG_ON(MAX <= 1); /* Order load from a with store to b. */
        if (q % MAX) {
                ACCESS_ONCE(b) = p;
@@ -742,14 +745,15 @@ of the 'if' statement.
 You must also be careful not to rely too much on boolean short-circuit
 evaluation.  Consider this example:
 
-       q = ACCESS_ONCE(a);
+       q = READ_ONCE_CTRL(a);
        if (a || 1 > 0)
                ACCESS_ONCE(b) = 1;
 
-Because the second condition is always true, the compiler can transform
-this example as following, defeating control dependency:
+Because the first condition cannot fault and the second condition is
+always true, the compiler can transform this example as following,
+defeating control dependency:
 
-       q = ACCESS_ONCE(a);
+       q = READ_ONCE_CTRL(a);
        ACCESS_ONCE(b) = 1;
 
 This example underscores the need to ensure that the compiler cannot
@@ -762,8 +766,8 @@ demonstrated by two related examples, with the initial values of
 x and y both being zero:
 
        CPU 0                     CPU 1
-       =====================     =====================
-       r1 = ACCESS_ONCE(x);      r2 = ACCESS_ONCE(y);
+       =======================   =======================
+       r1 = READ_ONCE_CTRL(x);   r2 = READ_ONCE_CTRL(y);
        if (r1 > 0)               if (r2 > 0)
          ACCESS_ONCE(y) = 1;       ACCESS_ONCE(x) = 1;
 
@@ -783,7 +787,8 @@ But because control dependencies do -not- provide transitivity, the above
 assertion can fail after the combined three-CPU example completes.  If you
 need the three-CPU example to provide ordering, you will need smp_mb()
 between the loads and stores in the CPU 0 and CPU 1 code fragments,
-that is, just before or just after the "if" statements.
+that is, just before or just after the "if" statements.  Furthermore,
+the original two-CPU example is very fragile and should be avoided.
 
 These two examples are the LB and WWC litmus tests from this paper:
 http://www.cl.cam.ac.uk/users/pes20/ppc-supplemental/test6.pdf and this
@@ -791,6 +796,12 @@ site: https://www.cl.cam.ac.uk/~pes20/ppcmem/index.html.
 
 In summary:
 
+  (*) Control dependencies must be headed by READ_ONCE_CTRL().
+      Or, as a much less preferable alternative, interpose
+      be headed by READ_ONCE() or an ACCESS_ONCE() read and must
+      have smp_read_barrier_depends() between this read and the
+      control-dependent write.
+
   (*) Control dependencies can order prior loads against later stores.
       However, they do -not- guarantee any other sort of ordering:
       Not prior loads against later loads, nor prior stores against
@@ -1662,7 +1673,7 @@ CPU from reordering them.
 
 There are some more advanced barrier functions:
 
- (*) set_mb(var, value)
+ (*) smp_store_mb(var, value)
 
      This assigns the value to the variable and then inserts a full memory
      barrier after it, depending on the function.  It isn't guaranteed to
@@ -1784,10 +1795,9 @@ for each construct.  These operations all imply certain barriers:
 
      Memory operations issued before the ACQUIRE may be completed after
      the ACQUIRE operation has completed.  An smp_mb__before_spinlock(),
-     combined with a following ACQUIRE, orders prior loads against
-     subsequent loads and stores and also orders prior stores against
-     subsequent stores.  Note that this is weaker than smp_mb()!  The
-     smp_mb__before_spinlock() primitive is free on many architectures.
+     combined with a following ACQUIRE, orders prior stores against
+     subsequent loads and stores. Note that this is weaker than smp_mb()!
+     The smp_mb__before_spinlock() primitive is free on many architectures.
 
  (2) RELEASE operation implication:
 
@@ -1975,7 +1985,7 @@ after it has altered the task state:
        CPU 1
        ===============================
        set_current_state();
-         set_mb();
+         smp_store_mb();
            STORE current->state
            <general barrier>
        LOAD event_indicated
@@ -2016,7 +2026,7 @@ between the STORE to indicate the event and the STORE to set TASK_RUNNING:
        CPU 1                           CPU 2
        =============================== ===============================
        set_current_state();            STORE event_indicated
-         set_mb();                     wake_up();
+         smp_store_mb();               wake_up();
            STORE current->state          <write barrier>
            <general barrier>             STORE current->state
        LOAD event_indicated
index d727a38291005f962848ed40a1ab11db4c167899..53a726855e49bfa4c313e46e15df1eec7cb610ae 100644 (file)
@@ -20,7 +20,7 @@
        files/UDP-Lite-HOWTO.txt
 
    o The Wireshark UDP-Lite WiKi (with capture files):
-       http://wiki.wireshark.org/Lightweight_User_Datagram_Protocol
+       https://wiki.wireshark.org/Lightweight_User_Datagram_Protocol
 
    o The Protocol Spec, RFC 3828, http://www.ietf.org/rfc/rfc3828.txt
 
index 57883ca2498bb5ce818139a73755518e78150df9..e89ce6624af2fab481a708ad1a0e4e20d1bc0c1c 100644 (file)
@@ -48,7 +48,7 @@ preemption must be disabled around such regions.
 
 Note, some FPU functions are already explicitly preempt safe.  For example,
 kernel_fpu_begin and kernel_fpu_end will disable and enable preemption.
-However, math_state_restore must be called with preemption disabled.
+However, fpu__restore() must be called with preemption disabled.
 
 
 RULE #3: Lock acquire and release must be performed by same task
index 21461a0441c12990d1564caffa48f9a7482aaf58..e114513a2731dc667071cee0e860d489c80049c0 100644 (file)
@@ -8,6 +8,10 @@ CONTENTS
  1. Overview
  2. Scheduling algorithm
  3. Scheduling Real-Time Tasks
+   3.1 Definitions
+   3.2 Schedulability Analysis for Uniprocessor Systems
+   3.3 Schedulability Analysis for Multiprocessor Systems
+   3.4 Relationship with SCHED_DEADLINE Parameters
  4. Bandwidth management
    4.1 System-wide settings
    4.2 Task interface
@@ -43,7 +47,7 @@ CONTENTS
  "deadline", to schedule tasks. A SCHED_DEADLINE task should receive
  "runtime" microseconds of execution time every "period" microseconds, and
  these "runtime" microseconds are available within "deadline" microseconds
- from the beginning of the period.  In order to implement this behaviour,
+ from the beginning of the period.  In order to implement this behavior,
  every time the task wakes up, the scheduler computes a "scheduling deadline"
  consistent with the guarantee (using the CBS[2,3] algorithm). Tasks are then
  scheduled using EDF[1] on these scheduling deadlines (the task with the
@@ -52,7 +56,7 @@ CONTENTS
  "admission control" strategy (see Section "4. Bandwidth management") is used
  (clearly, if the system is overloaded this guarantee cannot be respected).
 
- Summing up, the CBS[2,3] algorithms assigns scheduling deadlines to tasks so
+ Summing up, the CBS[2,3] algorithm assigns scheduling deadlines to tasks so
  that each task runs for at most its runtime every period, avoiding any
  interference between different tasks (bandwidth isolation), while the EDF[1]
  algorithm selects the task with the earliest scheduling deadline as the one
@@ -63,7 +67,7 @@ CONTENTS
  In more details, the CBS algorithm assigns scheduling deadlines to
  tasks in the following way:
 
-  - Each SCHED_DEADLINE task is characterised by the "runtime",
+  - Each SCHED_DEADLINE task is characterized by the "runtime",
     "deadline", and "period" parameters;
 
   - The state of the task is described by a "scheduling deadline", and
@@ -78,7 +82,7 @@ CONTENTS
 
     then, if the scheduling deadline is smaller than the current time, or
     this condition is verified, the scheduling deadline and the
-    remaining runtime are re-initialised as
+    remaining runtime are re-initialized as
 
          scheduling deadline = current time + deadline
          remaining runtime = runtime
@@ -126,31 +130,37 @@ CONTENTS
  suited for periodic or sporadic real-time tasks that need guarantees on their
  timing behavior, e.g., multimedia, streaming, control applications, etc.
 
+3.1 Definitions
+------------------------
+
  A typical real-time task is composed of a repetition of computation phases
  (task instances, or jobs) which are activated on a periodic or sporadic
  fashion.
- Each job J_j (where J_j is the j^th job of the task) is characterised by an
+ Each job J_j (where J_j is the j^th job of the task) is characterized by an
  arrival time r_j (the time when the job starts), an amount of computation
  time c_j needed to finish the job, and a job absolute deadline d_j, which
  is the time within which the job should be finished. The maximum execution
- time max_j{c_j} is called "Worst Case Execution Time" (WCET) for the task.
+ time max{c_j} is called "Worst Case Execution Time" (WCET) for the task.
  A real-time task can be periodic with period P if r_{j+1} = r_j + P, or
  sporadic with minimum inter-arrival time P is r_{j+1} >= r_j + P. Finally,
  d_j = r_j + D, where D is the task's relative deadline.
- The utilisation of a real-time task is defined as the ratio between its
+ Summing up, a real-time task can be described as
+       Task = (WCET, D, P)
+
+ The utilization of a real-time task is defined as the ratio between its
  WCET and its period (or minimum inter-arrival time), and represents
  the fraction of CPU time needed to execute the task.
 
- If the total utilisation sum_i(WCET_i/P_i) is larger than M (with M equal
+ If the total utilization U=sum(WCET_i/P_i) is larger than M (with M equal
  to the number of CPUs), then the scheduler is unable to respect all the
  deadlines.
- Note that total utilisation is defined as the sum of the utilisations
+ Note that total utilization is defined as the sum of the utilizations
  WCET_i/P_i over all the real-time tasks in the system. When considering
  multiple real-time tasks, the parameters of the i-th task are indicated
  with the "_i" suffix.
- Moreover, if the total utilisation is larger than M, then we risk starving
+ Moreover, if the total utilization is larger than M, then we risk starving
  non- real-time tasks by real-time tasks.
- If, instead, the total utilisation is smaller than M, then non real-time
+ If, instead, the total utilization is smaller than M, then non real-time
  tasks will not be starved and the system might be able to respect all the
  deadlines.
  As a matter of fact, in this case it is possible to provide an upper bound
@@ -159,38 +169,119 @@ CONTENTS
  More precisely, it can be proven that using a global EDF scheduler the
  maximum tardiness of each task is smaller or equal than
        ((M âˆ’ 1) Â· WCET_max âˆ’ WCET_min)/(M âˆ’ (M âˆ’ 2) Â· U_max) + WCET_max
- where WCET_max = max_i{WCET_i} is the maximum WCET, WCET_min=min_i{WCET_i}
- is the minimum WCET, and U_max = max_i{WCET_i/P_i} is the maximum utilisation.
+ where WCET_max = max{WCET_i} is the maximum WCET, WCET_min=min{WCET_i}
+ is the minimum WCET, and U_max = max{WCET_i/P_i} is the maximum
+ utilization[12].
+
+3.2 Schedulability Analysis for Uniprocessor Systems
+------------------------
 
  If M=1 (uniprocessor system), or in case of partitioned scheduling (each
  real-time task is statically assigned to one and only one CPU), it is
  possible to formally check if all the deadlines are respected.
  If D_i = P_i for all tasks, then EDF is able to respect all the deadlines
- of all the tasks executing on a CPU if and only if the total utilisation
+ of all the tasks executing on a CPU if and only if the total utilization
  of the tasks running on such a CPU is smaller or equal than 1.
  If D_i != P_i for some task, then it is possible to define the density of
- a task as C_i/min{D_i,T_i}, and EDF is able to respect all the deadlines
- of all the tasks running on a CPU if the sum sum_i C_i/min{D_i,T_i} of the
- densities of the tasks running on such a CPU is smaller or equal than 1
- (notice that this condition is only sufficient, and not necessary).
+ a task as WCET_i/min{D_i,P_i}, and EDF is able to respect all the deadlines
+ of all the tasks running on a CPU if the sum of the densities of the tasks
+ running on such a CPU is smaller or equal than 1:
+       sum(WCET_i / min{D_i, P_i}) <= 1
+ It is important to notice that this condition is only sufficient, and not
+ necessary: there are task sets that are schedulable, but do not respect the
+ condition. For example, consider the task set {Task_1,Task_2} composed by
+ Task_1=(50ms,50ms,100ms) and Task_2=(10ms,100ms,100ms).
+ EDF is clearly able to schedule the two tasks without missing any deadline
+ (Task_1 is scheduled as soon as it is released, and finishes just in time
+ to respect its deadline; Task_2 is scheduled immediately after Task_1, hence
+ its response time cannot be larger than 50ms + 10ms = 60ms) even if
+       50 / min{50,100} + 10 / min{100, 100} = 50 / 50 + 10 / 100 = 1.1
+ Of course it is possible to test the exact schedulability of tasks with
+ D_i != P_i (checking a condition that is both sufficient and necessary),
+ but this cannot be done by comparing the total utilization or density with
+ a constant. Instead, the so called "processor demand" approach can be used,
+ computing the total amount of CPU time h(t) needed by all the tasks to
+ respect all of their deadlines in a time interval of size t, and comparing
+ such a time with the interval size t. If h(t) is smaller than t (that is,
+ the amount of time needed by the tasks in a time interval of size t is
+ smaller than the size of the interval) for all the possible values of t, then
+ EDF is able to schedule the tasks respecting all of their deadlines. Since
+ performing this check for all possible values of t is impossible, it has been
+ proven[4,5,6] that it is sufficient to perform the test for values of t
+ between 0 and a maximum value L. The cited papers contain all of the
+ mathematical details and explain how to compute h(t) and L.
+ In any case, this kind of analysis is too complex as well as too
+ time-consuming to be performed on-line. Hence, as explained in Section
+ 4 Linux uses an admission test based on the tasks' utilizations.
+
+3.3 Schedulability Analysis for Multiprocessor Systems
+------------------------
 
  On multiprocessor systems with global EDF scheduling (non partitioned
  systems), a sufficient test for schedulability can not be based on the
- utilisations (it can be shown that task sets with utilisations slightly
- larger than 1 can miss deadlines regardless of the number of CPUs M).
- However, as previously stated, enforcing that the total utilisation is smaller
- than M is enough to guarantee that non real-time tasks are not starved and
- that the tardiness of real-time tasks has an upper bound.
+ utilizations or densities: it can be shown that even if D_i = P_i task
+ sets with utilizations slightly larger than 1 can miss deadlines regardless
+ of the number of CPUs.
+
+ Consider a set {Task_1,...Task_{M+1}} of M+1 tasks on a system with M
+ CPUs, with the first task Task_1=(P,P,P) having period, relative deadline
+ and WCET equal to P. The remaining M tasks Task_i=(e,P-1,P-1) have an
+ arbitrarily small worst case execution time (indicated as "e" here) and a
+ period smaller than the one of the first task. Hence, if all the tasks
+ activate at the same time t, global EDF schedules these M tasks first
+ (because their absolute deadlines are equal to t + P - 1, hence they are
+ smaller than the absolute deadline of Task_1, which is t + P). As a
+ result, Task_1 can be scheduled only at time t + e, and will finish at
+ time t + e + P, after its absolute deadline. The total utilization of the
+ task set is U = M Â· e / (P - 1) + P / P = M Â· e / (P - 1) + 1, and for small
+ values of e this can become very close to 1. This is known as "Dhall's
+ effect"[7]. Note: the example in the original paper by Dhall has been
+ slightly simplified here (for example, Dhall more correctly computed
+ lim_{e->0}U).
+
+ More complex schedulability tests for global EDF have been developed in
+ real-time literature[8,9], but they are not based on a simple comparison
+ between total utilization (or density) and a fixed constant. If all tasks
+ have D_i = P_i, a sufficient schedulability condition can be expressed in
+ a simple way:
+       sum(WCET_i / P_i) <= M - (M - 1) Â· U_max
+ where U_max = max{WCET_i / P_i}[10]. Notice that for U_max = 1,
+ M - (M - 1) Â· U_max becomes M - M + 1 = 1 and this schedulability condition
+ just confirms the Dhall's effect. A more complete survey of the literature
+ about schedulability tests for multi-processor real-time scheduling can be
+ found in [11].
+
+ As seen, enforcing that the total utilization is smaller than M does not
+ guarantee that global EDF schedules the tasks without missing any deadline
+ (in other words, global EDF is not an optimal scheduling algorithm). However,
+ a total utilization smaller than M is enough to guarantee that non real-time
+ tasks are not starved and that the tardiness of real-time tasks has an upper
+ bound[12] (as previously noted). Different bounds on the maximum tardiness
+ experienced by real-time tasks have been developed in various papers[13,14],
+ but the theoretical result that is important for SCHED_DEADLINE is that if
+ the total utilization is smaller or equal than M then the response times of
+ the tasks are limited.
+
+3.4 Relationship with SCHED_DEADLINE Parameters
+------------------------
 
- SCHED_DEADLINE can be used to schedule real-time tasks guaranteeing that
- the jobs' deadlines of a task are respected. In order to do this, a task
- must be scheduled by setting:
+ Finally, it is important to understand the relationship between the
+ SCHED_DEADLINE scheduling parameters described in Section 2 (runtime,
+ deadline and period) and the real-time task parameters (WCET, D, P)
+ described in this section. Note that the tasks' temporal constraints are
+ represented by its absolute deadlines d_j = r_j + D described above, while
+ SCHED_DEADLINE schedules the tasks according to scheduling deadlines (see
+ Section 2).
+ If an admission test is used to guarantee that the scheduling deadlines
+ are respected, then SCHED_DEADLINE can be used to schedule real-time tasks
+ guaranteeing that all the jobs' deadlines of a task are respected.
+ In order to do this, a task must be scheduled by setting:
 
   - runtime >= WCET
   - deadline = D
   - period <= P
 
- IOW, if runtime >= WCET and if period is >= P, then the scheduling deadlines
+ IOW, if runtime >= WCET and if period is <= P, then the scheduling deadlines
  and the absolute deadlines (d_j) coincide, so a proper admission control
  allows to respect the jobs' absolute deadlines for this task (this is what is
  called "hard schedulability property" and is an extension of Lemma 1 of [2]).
@@ -206,6 +297,39 @@ CONTENTS
       Symposium, 1998. http://retis.sssup.it/~giorgio/paps/1998/rtss98-cbs.pdf
   3 - L. Abeni. Server Mechanisms for Multimedia Applications. ReTiS Lab
       Technical Report. http://disi.unitn.it/~abeni/tr-98-01.pdf
+  4 - J. Y. Leung and M.L. Merril. A Note on Preemptive Scheduling of
+      Periodic, Real-Time Tasks. Information Processing Letters, vol. 11,
+      no. 3, pp. 115-118, 1980.
+  5 - S. K. Baruah, A. K. Mok and L. E. Rosier. Preemptively Scheduling
+      Hard-Real-Time Sporadic Tasks on One Processor. Proceedings of the
+      11th IEEE Real-time Systems Symposium, 1990.
+  6 - S. K. Baruah, L. E. Rosier and R. R. Howell. Algorithms and Complexity
+      Concerning the Preemptive Scheduling of Periodic Real-Time tasks on
+      One Processor. Real-Time Systems Journal, vol. 4, no. 2, pp 301-324,
+      1990.
+  7 - S. J. Dhall and C. L. Liu. On a real-time scheduling problem. Operations
+      research, vol. 26, no. 1, pp 127-140, 1978.
+  8 - T. Baker. Multiprocessor EDF and Deadline Monotonic Schedulability
+      Analysis. Proceedings of the 24th IEEE Real-Time Systems Symposium, 2003.
+  9 - T. Baker. An Analysis of EDF Schedulability on a Multiprocessor.
+      IEEE Transactions on Parallel and Distributed Systems, vol. 16, no. 8,
+      pp 760-768, 2005.
+  10 - J. Goossens, S. Funk and S. Baruah, Priority-Driven Scheduling of
+       Periodic Task Systems on Multiprocessors. Real-Time Systems Journal,
+       vol. 25, no. 2–3, pp. 187–205, 2003.
+  11 - R. Davis and A. Burns. A Survey of Hard Real-Time Scheduling for
+       Multiprocessor Systems. ACM Computing Surveys, vol. 43, no. 4, 2011.
+       http://www-users.cs.york.ac.uk/~robdavis/papers/MPSurveyv5.0.pdf
+  12 - U. C. Devi and J. H. Anderson. Tardiness Bounds under Global EDF
+       Scheduling on a Multiprocessor. Real-Time Systems Journal, vol. 32,
+       no. 2, pp 133-189, 2008.
+  13 - P. Valente and G. Lipari. An Upper Bound to the Lateness of Soft
+       Real-Time Tasks Scheduled by EDF on Multiprocessors. Proceedings of
+       the 26th IEEE Real-Time Systems Symposium, 2005.
+  14 - J. Erickson, U. Devi and S. Baruah. Improved tardiness bounds for
+       Global EDF. Proceedings of the 22nd Euromicro Conference on
+       Real-Time Systems, 2010.
+
 
 4. Bandwidth management
 =======================
@@ -218,10 +342,10 @@ CONTENTS
  no guarantee can be given on the actual scheduling of the -deadline tasks.
 
  As already stated in Section 3, a necessary condition to be respected to
- correctly schedule a set of real-time tasks is that the total utilisation
+ correctly schedule a set of real-time tasks is that the total utilization
  is smaller than M. When talking about -deadline tasks, this requires that
  the sum of the ratio between runtime and period for all tasks is smaller
- than M. Notice that the ratio runtime/period is equivalent to the utilisation
+ than M. Notice that the ratio runtime/period is equivalent to the utilization
  of a "traditional" real-time task, and is also often referred to as
  "bandwidth".
  The interface used to control the CPU bandwidth that can be allocated
@@ -251,7 +375,7 @@ CONTENTS
  The system wide settings are configured under the /proc virtual file system.
 
  For now the -rt knobs are used for -deadline admission control and the
- -deadline runtime is accounted against the -rt runtime. We realise that this
+ -deadline runtime is accounted against the -rt runtime. We realize that this
  isn't entirely desirable; however, it is better to have a small interface for
  now, and be able to change it easily later. The ideal situation (see 5.) is to
  run -rt tasks from a -deadline server; in which case the -rt bandwidth is a
index 1e52d67d0abf5c8ccb86b6aa5d1170f509d73132..dbe6623fed1c286bd49277486492b2960428eff6 100644 (file)
@@ -198,6 +198,9 @@ TTY_IO_ERROR                If set, causes all subsequent userspace read/write
 
 TTY_OTHER_CLOSED       Device is a pty and the other side has closed.
 
+TTY_OTHER_DONE         Device is a pty and the other side has closed and
+                       all pending input processing has been completed.
+
 TTY_NO_WRITE_SPLIT     Prevent driver from splitting up writes into
                        smaller chunks.
 
index 43e94ea6d2cad8d0e17715a7ecf4d1e31b9b9e0e..263b907517ac2cd14e3b8472f4bc23f4aa8aae07 100644 (file)
@@ -15,8 +15,7 @@ Contents:
   a) Discovering and configuring TCMU uio devices
   b) Waiting for events on the device(s)
   c) Managing the command ring
-3) Command filtering and pass_level
-4) A final note
+3) A final note
 
 
 TCM Userspace Design
@@ -324,7 +323,7 @@ int handle_device_events(int fd, void *map)
   /* Process events from cmd ring until we catch up with cmd_head */
   while (ent != (void *)mb + mb->cmdr_off + mb->cmd_head) {
 
-    if (tcmu_hdr_get_op(&ent->hdr) == TCMU_OP_CMD) {
+    if (tcmu_hdr_get_op(ent->hdr.len_op) == TCMU_OP_CMD) {
       uint8_t *cdb = (void *)mb + ent->req.cdb_off;
       bool success = true;
 
@@ -339,8 +338,12 @@ int handle_device_events(int fd, void *map)
         ent->rsp.scsi_status = SCSI_CHECK_CONDITION;
       }
     }
+    else if (tcmu_hdr_get_op(ent->hdr.len_op) != TCMU_OP_PAD) {
+      /* Tell the kernel we didn't handle unknown opcodes */
+      ent->hdr.uflags |= TCMU_UFLAG_UNKNOWN_OP;
+    }
     else {
-      /* Do nothing for PAD entries */
+      /* Do nothing for PAD entries except update cmd_tail */
     }
 
     /* update cmd_tail */
@@ -360,28 +363,6 @@ int handle_device_events(int fd, void *map)
 }
 
 
-Command filtering and pass_level
---------------------------------
-
-TCMU supports a "pass_level" option with valid values of 0 or 1.  When
-the value is 0 (the default), nearly all SCSI commands received for
-the device are passed through to the handler. This allows maximum
-flexibility but increases the amount of code required by the handler,
-to support all mandatory SCSI commands. If pass_level is set to 1,
-then only IO-related commands are presented, and the rest are handled
-by LIO's in-kernel command emulation. The commands presented at level
-1 include all versions of:
-
-READ
-WRITE
-WRITE_VERIFY
-XDWRITEREAD
-WRITE_SAME
-COMPARE_AND_WRITE
-SYNCHRONIZE_CACHE
-UNMAP
-
-
 A final note
 ------------
 
index 53838d9c6295792501f2825175ac9c314c1a2fc7..c59bd9bc41efa984cfd5f0e17ac5fd6156acfca6 100644 (file)
@@ -169,6 +169,10 @@ Shadow pages contain the following information:
     Contains the value of cr4.smep && !cr0.wp for which the page is valid
     (pages for which this is true are different from other pages; see the
     treatment of cr0.wp=0 below).
+  role.smap_andnot_wp:
+    Contains the value of cr4.smap && !cr0.wp for which the page is valid
+    (pages for which this is true are different from other pages; see the
+    treatment of cr0.wp=0 below).
   gfn:
     Either the guest page table containing the translations shadowed by this
     page, or the base page frame for linear translations.  See role.direct.
@@ -344,10 +348,16 @@ on fault type:
 
 (user write faults generate a #PF)
 
-In the first case there is an additional complication if CR4.SMEP is
-enabled: since we've turned the page into a kernel page, the kernel may now
-execute it.  We handle this by also setting spte.nx.  If we get a user
-fetch or read fault, we'll change spte.u=1 and spte.nx=gpte.nx back.
+In the first case there are two additional complications:
+- if CR4.SMEP is enabled: since we've turned the page into a kernel page,
+  the kernel may now execute it.  We handle this by also setting spte.nx.
+  If we get a user fetch or read fault, we'll change spte.u=1 and
+  spte.nx=gpte.nx back.
+- if CR4.SMAP is disabled: since the page has been changed to a kernel
+  page, it can not be reused when CR4.SMAP is enabled. We set
+  CR4.SMAP && !CR0.WP into shadow page's role to avoid this case. Note,
+  here we do not care the case that CR4.SMAP is enabled since KVM will
+  directly inject #PF to guest due to failed permission check.
 
 To prevent an spte that was converted into a kernel page with cr0.wp=0
 from being written by the kernel after cr0.wp has changed to 1, we make
index 88b85899d30953a6be096a9e045fcf54be3676b4..7c1f9fad667460ff143b867ca5ce5d629560e876 100644 (file)
@@ -1124,7 +1124,6 @@ The boot loader *must* fill out the following fields in bp,
 
     o hdr.code32_start
     o hdr.cmd_line_ptr
-    o hdr.cmdline_size
     o hdr.ramdisk_image (if applicable)
     o hdr.ramdisk_size  (if applicable)
 
index 9132b86176a3899b6ad8bd7f4bd5b630dd6fa031..33884d15612599805f922f386474109ab44998ed 100644 (file)
@@ -18,10 +18,10 @@ Some of these entries are:
 
  - system_call: syscall instruction from 64-bit code.
 
- - ia32_syscall: int 0x80 from 32-bit or 64-bit code; compat syscall
+ - entry_INT80_compat: int 0x80 from 32-bit or 64-bit code; compat syscall
    either way.
 
- - ia32_syscall, ia32_sysenter: syscall and sysenter from 32-bit
+ - entry_INT80_compat, ia32_sysenter: syscall and sysenter from 32-bit
    code
 
  - interrupt: An array of entries.  Every IDT vector that doesn't
diff --git a/Documentation/x86/kernel-stacks b/Documentation/x86/kernel-stacks
new file mode 100644 (file)
index 0000000..0f3a6c2
--- /dev/null
@@ -0,0 +1,141 @@
+Kernel stacks on x86-64 bit
+---------------------------
+
+Most of the text from Keith Owens, hacked by AK
+
+x86_64 page size (PAGE_SIZE) is 4K.
+
+Like all other architectures, x86_64 has a kernel stack for every
+active thread.  These thread stacks are THREAD_SIZE (2*PAGE_SIZE) big.
+These stacks contain useful data as long as a thread is alive or a
+zombie. While the thread is in user space the kernel stack is empty
+except for the thread_info structure at the bottom.
+
+In addition to the per thread stacks, there are specialized stacks
+associated with each CPU.  These stacks are only used while the kernel
+is in control on that CPU; when a CPU returns to user space the
+specialized stacks contain no useful data.  The main CPU stacks are:
+
+* Interrupt stack.  IRQSTACKSIZE
+
+  Used for external hardware interrupts.  If this is the first external
+  hardware interrupt (i.e. not a nested hardware interrupt) then the
+  kernel switches from the current task to the interrupt stack.  Like
+  the split thread and interrupt stacks on i386, this gives more room
+  for kernel interrupt processing without having to increase the size
+  of every per thread stack.
+
+  The interrupt stack is also used when processing a softirq.
+
+Switching to the kernel interrupt stack is done by software based on a
+per CPU interrupt nest counter. This is needed because x86-64 "IST"
+hardware stacks cannot nest without races.
+
+x86_64 also has a feature which is not available on i386, the ability
+to automatically switch to a new stack for designated events such as
+double fault or NMI, which makes it easier to handle these unusual
+events on x86_64.  This feature is called the Interrupt Stack Table
+(IST).  There can be up to 7 IST entries per CPU. The IST code is an
+index into the Task State Segment (TSS). The IST entries in the TSS
+point to dedicated stacks; each stack can be a different size.
+
+An IST is selected by a non-zero value in the IST field of an
+interrupt-gate descriptor.  When an interrupt occurs and the hardware
+loads such a descriptor, the hardware automatically sets the new stack
+pointer based on the IST value, then invokes the interrupt handler.  If
+the interrupt came from user mode, then the interrupt handler prologue
+will switch back to the per-thread stack.  If software wants to allow
+nested IST interrupts then the handler must adjust the IST values on
+entry to and exit from the interrupt handler.  (This is occasionally
+done, e.g. for debug exceptions.)
+
+Events with different IST codes (i.e. with different stacks) can be
+nested.  For example, a debug interrupt can safely be interrupted by an
+NMI.  arch/x86_64/kernel/entry.S::paranoidentry adjusts the stack
+pointers on entry to and exit from all IST events, in theory allowing
+IST events with the same code to be nested.  However in most cases, the
+stack size allocated to an IST assumes no nesting for the same code.
+If that assumption is ever broken then the stacks will become corrupt.
+
+The currently assigned IST stacks are :-
+
+* DOUBLEFAULT_STACK.  EXCEPTION_STKSZ (PAGE_SIZE).
+
+  Used for interrupt 8 - Double Fault Exception (#DF).
+
+  Invoked when handling one exception causes another exception. Happens
+  when the kernel is very confused (e.g. kernel stack pointer corrupt).
+  Using a separate stack allows the kernel to recover from it well enough
+  in many cases to still output an oops.
+
+* NMI_STACK.  EXCEPTION_STKSZ (PAGE_SIZE).
+
+  Used for non-maskable interrupts (NMI).
+
+  NMI can be delivered at any time, including when the kernel is in the
+  middle of switching stacks.  Using IST for NMI events avoids making
+  assumptions about the previous state of the kernel stack.
+
+* DEBUG_STACK.  DEBUG_STKSZ
+
+  Used for hardware debug interrupts (interrupt 1) and for software
+  debug interrupts (INT3).
+
+  When debugging a kernel, debug interrupts (both hardware and
+  software) can occur at any time.  Using IST for these interrupts
+  avoids making assumptions about the previous state of the kernel
+  stack.
+
+* MCE_STACK.  EXCEPTION_STKSZ (PAGE_SIZE).
+
+  Used for interrupt 18 - Machine Check Exception (#MC).
+
+  MCE can be delivered at any time, including when the kernel is in the
+  middle of switching stacks.  Using IST for MCE events avoids making
+  assumptions about the previous state of the kernel stack.
+
+For more details see the Intel IA32 or AMD AMD64 architecture manuals.
+
+
+Printing backtraces on x86
+--------------------------
+
+The question about the '?' preceding function names in an x86 stacktrace
+keeps popping up, here's an indepth explanation. It helps if the reader
+stares at print_context_stack() and the whole machinery in and around
+arch/x86/kernel/dumpstack.c.
+
+Adapted from Ingo's mail, Message-ID: <20150521101614.GA10889@gmail.com>:
+
+We always scan the full kernel stack for return addresses stored on
+the kernel stack(s) [*], from stack top to stack bottom, and print out
+anything that 'looks like' a kernel text address.
+
+If it fits into the frame pointer chain, we print it without a question
+mark, knowing that it's part of the real backtrace.
+
+If the address does not fit into our expected frame pointer chain we
+still print it, but we print a '?'. It can mean two things:
+
+ - either the address is not part of the call chain: it's just stale
+   values on the kernel stack, from earlier function calls. This is
+   the common case.
+
+ - or it is part of the call chain, but the frame pointer was not set
+   up properly within the function, so we don't recognize it.
+
+This way we will always print out the real call chain (plus a few more
+entries), regardless of whether the frame pointer was set up correctly
+or not - but in most cases we'll get the call chain right as well. The
+entries printed are strictly in stack order, so you can deduce more
+information from that as well.
+
+The most important property of this method is that we _never_ lose
+information: we always strive to print _all_ addresses on the stack(s)
+that look like kernel text addresses, so if debug information is wrong,
+we still print out the real call chain as well - just with more question
+marks than ideal.
+
+[*] For things like IRQ and IST stacks, we also scan those stacks, in
+    the right order, and try to cross from one stack into another
+    reconstructing the call chain. This works most of the time.
index cc071dc333c213676f2af659f318d7f836f2278a..860bc3adc223440df4f6a4aec7bc9fdf95e38c75 100644 (file)
@@ -1,7 +1,19 @@
 MTRR (Memory Type Range Register) control
-3 Jun 1999
-Richard Gooch
-<rgooch@atnf.csiro.au>
+
+Richard Gooch <rgooch@atnf.csiro.au> - 3 Jun 1999
+Luis R. Rodriguez <mcgrof@do-not-panic.com> - April 9, 2015
+
+===============================================================================
+Phasing out MTRR use
+
+MTRR use is replaced on modern x86 hardware with PAT. Over time the only type
+of effective MTRR that is expected to be supported will be for write-combining.
+As MTRR use is phased out device drivers should use arch_phys_wc_add() to make
+MTRR effective on non-PAT systems while a no-op on PAT enabled systems.
+
+For details refer to Documentation/x86/pat.txt.
+
+===============================================================================
 
   On Intel P6 family processors (Pentium Pro, Pentium II and later)
   the Memory Type Range Registers (MTRRs) may be used to control
index cf08c9fff3cdaa62b2217f8e4526d9a7cc88fbec..54944c71b819bd7b37aeb4d221ef02fffd201879 100644 (file)
@@ -12,7 +12,7 @@ virtual addresses.
 
 PAT allows for different types of memory attributes. The most commonly used
 ones that will be supported at this time are Write-back, Uncached,
-Write-combined and Uncached Minus.
+Write-combined, Write-through and Uncached Minus.
 
 
 PAT APIs
@@ -34,16 +34,23 @@ ioremap                |    --    |    UC-     |       UC-        |
                        |          |            |                  |
 ioremap_cache          |    --    |    WB      |       WB         |
                        |          |            |                  |
+ioremap_uc             |    --    |    UC      |       UC         |
+                       |          |            |                  |
 ioremap_nocache        |    --    |    UC-     |       UC-        |
                        |          |            |                  |
 ioremap_wc             |    --    |    --      |       WC         |
                        |          |            |                  |
+ioremap_wt             |    --    |    --      |       WT         |
+                       |          |            |                  |
 set_memory_uc          |    UC-   |    --      |       --         |
  set_memory_wb         |          |            |                  |
                        |          |            |                  |
 set_memory_wc          |    WC    |    --      |       --         |
  set_memory_wb         |          |            |                  |
                        |          |            |                  |
+set_memory_wt          |    WT    |    --      |       --         |
+ set_memory_wb         |          |            |                  |
+                       |          |            |                  |
 pci sysfs resource     |    --    |    --      |       UC-        |
                        |          |            |                  |
 pci sysfs resource_wc  |    --    |    --      |       WC         |
@@ -102,7 +109,38 @@ wants to export a RAM region, it has to do set_memory_uc() or set_memory_wc()
 as step 0 above and also track the usage of those pages and use set_memory_wb()
 before the page is freed to free pool.
 
-
+MTRR effects on PAT / non-PAT systems
+-------------------------------------
+
+The following table provides the effects of using write-combining MTRRs when
+using ioremap*() calls on x86 for both non-PAT and PAT systems. Ideally
+mtrr_add() usage will be phased out in favor of arch_phys_wc_add() which will
+be a no-op on PAT enabled systems. The region over which a arch_phys_wc_add()
+is made, should already have been ioremapped with WC attributes or PAT entries,
+this can be done by using ioremap_wc() / set_memory_wc().  Devices which
+combine areas of IO memory desired to remain uncacheable with areas where
+write-combining is desirable should consider use of ioremap_uc() followed by
+set_memory_wc() to white-list effective write-combined areas.  Such use is
+nevertheless discouraged as the effective memory type is considered
+implementation defined, yet this strategy can be used as last resort on devices
+with size-constrained regions where otherwise MTRR write-combining would
+otherwise not be effective.
+
+----------------------------------------------------------------------
+MTRR Non-PAT   PAT    Linux ioremap value        Effective memory type
+----------------------------------------------------------------------
+                                                  Non-PAT |  PAT
+     PAT
+     |PCD
+     ||PWT
+     |||
+WC   000      WB      _PAGE_CACHE_MODE_WB            WC   |   WC
+WC   001      WC      _PAGE_CACHE_MODE_WC            WC*  |   WC
+WC   010      UC-     _PAGE_CACHE_MODE_UC_MINUS      WC*  |   UC
+WC   011      UC      _PAGE_CACHE_MODE_UC            UC   |   UC
+----------------------------------------------------------------------
+
+(*) denotes implementation defined and is discouraged
 
 Notes:
 
@@ -115,8 +153,8 @@ can be more restrictive, in case of any existing aliasing for that address.
 For example: If there is an existing uncached mapping, a new ioremap_wc can
 return uncached mapping in place of write-combine requested.
 
-set_memory_[uc|wc] and set_memory_wb should be used in pairs, where driver will
-first make a region uc or wc and switch it back to wb after use.
+set_memory_[uc|wc|wt] and set_memory_wb should be used in pairs, where driver
+will first make a region uc, wc or wt and switch it back to wb after use.
 
 Over time writes to /proc/mtrr will be deprecated in favor of using PAT based
 interfaces. Users writing to /proc/mtrr are suggested to use above interfaces.
@@ -124,7 +162,7 @@ interfaces. Users writing to /proc/mtrr are suggested to use above interfaces.
 Drivers should use ioremap_[uc|wc] to access PCI BARs with [uc|wc] access
 types.
 
-Drivers should use set_memory_[uc|wc] to set access type for RAM ranges.
+Drivers should use set_memory_[uc|wc|wt] to set access type for RAM ranges.
 
 
 PAT debugging
index 5223479291a295e7b248661d28c14ea8a33b3eaa..68ed3114c363bf7332b2515b156bf0953ca88d24 100644 (file)
@@ -31,6 +31,9 @@ Machine check
                (e.g. BIOS or hardware monitoring applications), conflicting
                with OS's error handling, and you cannot deactivate the agent,
                then this option will be a help.
+   mce=no_lmce
+               Do not opt-in to Local MCE delivery. Use legacy method
+               to broadcast MCEs.
    mce=bootlog
                Enable logging of machine checks left over from booting.
                Disabled by default on AMD because some BIOS leave bogus ones.
diff --git a/Documentation/x86/x86_64/kernel-stacks b/Documentation/x86/x86_64/kernel-stacks
deleted file mode 100644 (file)
index e3c8a49..0000000
+++ /dev/null
@@ -1,101 +0,0 @@
-Most of the text from Keith Owens, hacked by AK
-
-x86_64 page size (PAGE_SIZE) is 4K.
-
-Like all other architectures, x86_64 has a kernel stack for every
-active thread.  These thread stacks are THREAD_SIZE (2*PAGE_SIZE) big.
-These stacks contain useful data as long as a thread is alive or a
-zombie. While the thread is in user space the kernel stack is empty
-except for the thread_info structure at the bottom.
-
-In addition to the per thread stacks, there are specialized stacks
-associated with each CPU.  These stacks are only used while the kernel
-is in control on that CPU; when a CPU returns to user space the
-specialized stacks contain no useful data.  The main CPU stacks are:
-
-* Interrupt stack.  IRQSTACKSIZE
-
-  Used for external hardware interrupts.  If this is the first external
-  hardware interrupt (i.e. not a nested hardware interrupt) then the
-  kernel switches from the current task to the interrupt stack.  Like
-  the split thread and interrupt stacks on i386, this gives more room
-  for kernel interrupt processing without having to increase the size
-  of every per thread stack.
-
-  The interrupt stack is also used when processing a softirq.
-
-Switching to the kernel interrupt stack is done by software based on a
-per CPU interrupt nest counter. This is needed because x86-64 "IST"
-hardware stacks cannot nest without races.
-
-x86_64 also has a feature which is not available on i386, the ability
-to automatically switch to a new stack for designated events such as
-double fault or NMI, which makes it easier to handle these unusual
-events on x86_64.  This feature is called the Interrupt Stack Table
-(IST).  There can be up to 7 IST entries per CPU. The IST code is an
-index into the Task State Segment (TSS). The IST entries in the TSS
-point to dedicated stacks; each stack can be a different size.
-
-An IST is selected by a non-zero value in the IST field of an
-interrupt-gate descriptor.  When an interrupt occurs and the hardware
-loads such a descriptor, the hardware automatically sets the new stack
-pointer based on the IST value, then invokes the interrupt handler.  If
-the interrupt came from user mode, then the interrupt handler prologue
-will switch back to the per-thread stack.  If software wants to allow
-nested IST interrupts then the handler must adjust the IST values on
-entry to and exit from the interrupt handler.  (This is occasionally
-done, e.g. for debug exceptions.)
-
-Events with different IST codes (i.e. with different stacks) can be
-nested.  For example, a debug interrupt can safely be interrupted by an
-NMI.  arch/x86_64/kernel/entry.S::paranoidentry adjusts the stack
-pointers on entry to and exit from all IST events, in theory allowing
-IST events with the same code to be nested.  However in most cases, the
-stack size allocated to an IST assumes no nesting for the same code.
-If that assumption is ever broken then the stacks will become corrupt.
-
-The currently assigned IST stacks are :-
-
-* STACKFAULT_STACK.  EXCEPTION_STKSZ (PAGE_SIZE).
-
-  Used for interrupt 12 - Stack Fault Exception (#SS).
-
-  This allows the CPU to recover from invalid stack segments. Rarely
-  happens.
-
-* DOUBLEFAULT_STACK.  EXCEPTION_STKSZ (PAGE_SIZE).
-
-  Used for interrupt 8 - Double Fault Exception (#DF).
-
-  Invoked when handling one exception causes another exception. Happens
-  when the kernel is very confused (e.g. kernel stack pointer corrupt).
-  Using a separate stack allows the kernel to recover from it well enough
-  in many cases to still output an oops.
-
-* NMI_STACK.  EXCEPTION_STKSZ (PAGE_SIZE).
-
-  Used for non-maskable interrupts (NMI).
-
-  NMI can be delivered at any time, including when the kernel is in the
-  middle of switching stacks.  Using IST for NMI events avoids making
-  assumptions about the previous state of the kernel stack.
-
-* DEBUG_STACK.  DEBUG_STKSZ
-
-  Used for hardware debug interrupts (interrupt 1) and for software
-  debug interrupts (INT3).
-
-  When debugging a kernel, debug interrupts (both hardware and
-  software) can occur at any time.  Using IST for these interrupts
-  avoids making assumptions about the previous state of the kernel
-  stack.
-
-* MCE_STACK.  EXCEPTION_STKSZ (PAGE_SIZE).
-
-  Used for interrupt 18 - Machine Check Exception (#MC).
-
-  MCE can be delivered at any time, including when the kernel is in the
-  middle of switching stacks.  Using IST for MCE events avoids making
-  assumptions about the previous state of the kernel stack.
-
-For more details see the Intel IA32 or AMD AMD64 architecture manuals.
diff --git a/Kbuild b/Kbuild
index 6f0d82a9245d897c89a63819e857f474ab93066e..df99a5f53beb880482871e99453bf04ef2f0fb06 100644 (file)
--- a/Kbuild
+++ b/Kbuild
@@ -2,8 +2,9 @@
 # Kbuild for top-level directory of the kernel
 # This file takes care of the following:
 # 1) Generate bounds.h
-# 2) Generate asm-offsets.h (may need bounds.h)
-# 3) Check for missing system calls
+# 2) Generate timeconst.h
+# 3) Generate asm-offsets.h (may need bounds.h and timeconst.h)
+# 4) Check for missing system calls
 
 # Default sed regexp - multiline due to syntax constraints
 define sed-y
@@ -47,7 +48,26 @@ $(obj)/$(bounds-file): kernel/bounds.s FORCE
        $(call filechk,offsets,__LINUX_BOUNDS_H__)
 
 #####
-# 2) Generate asm-offsets.h
+# 2) Generate timeconst.h
+
+timeconst-file := include/generated/timeconst.h
+
+#always  += $(timeconst-file)
+targets += $(timeconst-file)
+
+quiet_cmd_gentimeconst = GEN     $@
+define cmd_gentimeconst
+       (echo $(CONFIG_HZ) | bc -q $< ) > $@
+endef
+define filechk_gentimeconst
+       (echo $(CONFIG_HZ) | bc -q $< )
+endef
+
+$(obj)/$(timeconst-file): kernel/time/timeconst.bc FORCE
+       $(call filechk,gentimeconst)
+
+#####
+# 3) Generate asm-offsets.h
 #
 
 offsets-file := include/generated/asm-offsets.h
@@ -57,7 +77,7 @@ targets += arch/$(SRCARCH)/kernel/asm-offsets.s
 
 # We use internal kbuild rules to avoid the "is up to date" message from make
 arch/$(SRCARCH)/kernel/asm-offsets.s: arch/$(SRCARCH)/kernel/asm-offsets.c \
-                                      $(obj)/$(bounds-file) FORCE
+                                      $(obj)/$(timeconst-file) $(obj)/$(bounds-file) FORCE
        $(Q)mkdir -p $(dir $@)
        $(call if_changed_dep,cc_s_c)
 
@@ -65,7 +85,7 @@ $(obj)/$(offsets-file): arch/$(SRCARCH)/kernel/asm-offsets.s FORCE
        $(call filechk,offsets,__ASM_OFFSETS_H__)
 
 #####
-# 3) Check for missing system calls
+# 4) Check for missing system calls
 #
 
 always += missing-syscalls
@@ -77,5 +97,5 @@ quiet_cmd_syscalls = CALL    $<
 missing-syscalls: scripts/checksyscalls.sh $(offsets-file) FORCE
        $(call cmd,syscalls)
 
-# Keep these two files during make clean
-no-clean-files := $(bounds-file) $(offsets-file)
+# Keep these three files during make clean
+no-clean-files := $(bounds-file) $(offsets-file) $(timeconst-file)
index b399b34a24969a82e6d56fa7265dc8f724a36365..a655435705aa47563a630a3736d486033eae4e7b 100644 (file)
@@ -51,9 +51,9 @@ trivial patch so apply some common sense.
        or does something very odd once a month document it.
 
        PLEASE remember that submissions must be made under the terms
-       of the OSDL certificate of contribution and should include a
-       Signed-off-by: line.  The current version of this "Developer's
-       Certificate of Origin" (DCO) is listed in the file
+       of the Linux Foundation certificate of contribution and should
+       include a Signed-off-by: line.  The current version of this
+       "Developer's Certificate of Origin" (DCO) is listed in the file
        Documentation/SubmittingPatches.
 
 6.     Make sure you have the right to send any changes you make. If you
@@ -892,11 +892,10 @@ S:        Maintained
 F:     arch/arm/mach-alpine/
 
 ARM/ATMEL AT91RM9200 AND AT91SAM ARM ARCHITECTURES
-M:     Andrew Victor <linux@maxim.org.za>
 M:     Nicolas Ferre <nicolas.ferre@atmel.com>
+M:     Alexandre Belloni <alexandre.belloni@free-electrons.com>
 M:     Jean-Christophe Plagniol-Villard <plagnioj@jcrosoft.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-W:     http://maxim.org.za/at91_26.html
 W:     http://www.linux4sam.org
 S:     Supported
 F:     arch/arm/mach-at91/
@@ -975,7 +974,7 @@ S:  Maintained
 ARM/CORTINA SYSTEMS GEMINI ARM ARCHITECTURE
 M:     Hans Ulli Kroll <ulli.kroll@googlemail.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
-T:     git git://git.berlios.de/gemini-board
+T:     git git://github.com/ulli-kroll/linux.git
 S:     Maintained
 F:     arch/arm/mach-gemini/
 
@@ -990,6 +989,12 @@ F: drivers/clocksource/timer-prima2.c
 F:     drivers/clocksource/timer-atlas7.c
 N:     [^a-z]sirf
 
+ARM/CONEXANT DIGICOLOR MACHINE SUPPORT
+M:     Baruch Siach <baruch@tkos.co.il>
+L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
+S:     Maintained
+N:     digicolor
+
 ARM/EBSA110 MACHINE SUPPORT
 M:     Russell King <linux@arm.linux.org.uk>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
@@ -1188,7 +1193,7 @@ ARM/MAGICIAN MACHINE SUPPORT
 M:     Philipp Zabel <philipp.zabel@gmail.com>
 S:     Maintained
 
-ARM/Marvell Armada 370 and Armada XP SOC support
+ARM/Marvell Kirkwood and Armada 370, 375, 38x, XP SOC support
 M:     Jason Cooper <jason@lakedaemon.net>
 M:     Andrew Lunn <andrew@lunn.ch>
 M:     Gregory Clement <gregory.clement@free-electrons.com>
@@ -1197,12 +1202,17 @@ L:      linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-mvebu/
 F:     drivers/rtc/rtc-armada38x.c
+F:     arch/arm/boot/dts/armada*
+F:     arch/arm/boot/dts/kirkwood*
+
 
 ARM/Marvell Berlin SoC support
 M:     Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 S:     Maintained
 F:     arch/arm/mach-berlin/
+F:     arch/arm/boot/dts/berlin*
+
 
 ARM/Marvell Dove/MV78xx0/Orion SOC support
 M:     Jason Cooper <jason@lakedaemon.net>
@@ -1215,6 +1225,9 @@ F:        arch/arm/mach-dove/
 F:     arch/arm/mach-mv78xx0/
 F:     arch/arm/mach-orion5x/
 F:     arch/arm/plat-orion/
+F:     arch/arm/boot/dts/dove*
+F:     arch/arm/boot/dts/orion5x*
+
 
 ARM/Orion SoC/Technologic Systems TS-78xx platform support
 M:     Alexander Clouter <alex@digriz.org.uk>
@@ -1366,6 +1379,7 @@ N:        rockchip
 
 ARM/SAMSUNG EXYNOS ARM ARCHITECTURES
 M:     Kukjin Kim <kgene@kernel.org>
+M:     Krzysztof Kozlowski <k.kozlowski@samsung.com>
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
 S:     Maintained
@@ -1439,9 +1453,10 @@ ARM/SOCFPGA ARCHITECTURE
 M:     Dinh Nguyen <dinguyen@opensource.altera.com>
 S:     Maintained
 F:     arch/arm/mach-socfpga/
+F:     arch/arm/boot/dts/socfpga*
+F:     arch/arm/configs/socfpga_defconfig
 W:     http://www.rocketboards.org
-T:     git://git.rocketboards.org/linux-socfpga.git
-T:     git://git.rocketboards.org/linux-socfpga-next.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/dinguyen/linux.git
 
 ARM/SOCFPGA CLOCK FRAMEWORK SUPPORT
 M:     Dinh Nguyen <dinguyen@opensource.altera.com>
@@ -1929,7 +1944,7 @@ S:        Maintained
 F:     drivers/net/wireless/b43legacy/
 
 BACKLIGHT CLASS/SUBSYSTEM
-M:     Jingoo Han <jg1.han@samsung.com>
+M:     Jingoo Han <jingoohan1@gmail.com>
 M:     Lee Jones <lee.jones@linaro.org>
 S:     Maintained
 F:     drivers/video/backlight/
@@ -2116,8 +2131,9 @@ S:        Supported
 F:     drivers/net/ethernet/broadcom/bnx2x/
 
 BROADCOM BCM281XX/BCM11XXX/BCM216XX ARM ARCHITECTURE
-M:     Christian Daudt <bcm@fixthebug.org>
 M:     Florian Fainelli <f.fainelli@gmail.com>
+M:     Ray Jui <rjui@broadcom.com>
+M:     Scott Branden <sbranden@broadcom.com>
 L:     bcm-kernel-feedback-list@broadcom.com
 T:     git git://github.com/broadcom/mach-bcm
 S:     Maintained
@@ -2168,7 +2184,6 @@ S:        Maintained
 F:     drivers/usb/gadget/udc/bcm63xx_udc.*
 
 BROADCOM BCM7XXX ARM ARCHITECTURE
-M:     Marc Carino <marc.ceeeee@gmail.com>
 M:     Brian Norris <computersforpeace@gmail.com>
 M:     Gregory Fong <gregory.0xf0@gmail.com>
 M:     Florian Fainelli <f.fainelli@gmail.com>
@@ -2412,7 +2427,6 @@ L:        linux-security-module@vger.kernel.org
 S:     Supported
 F:     include/linux/capability.h
 F:     include/uapi/linux/capability.h
-F:     security/capability.c
 F:     security/commoncap.c
 F:     kernel/capability.c
 
@@ -3810,10 +3824,11 @@ M:      David Woodhouse <dwmw2@infradead.org>
 L:     linux-embedded@vger.kernel.org
 S:     Maintained
 
-EMULEX LPFC FC SCSI DRIVER
-M:     James Smart <james.smart@emulex.com>
+EMULEX/AVAGO LPFC FC/FCOE SCSI DRIVER
+M:     James Smart <james.smart@avagotech.com>
+M:     Dick Kennedy <dick.kennedy@avagotech.com>
 L:     linux-scsi@vger.kernel.org
-W:     http://sourceforge.net/projects/lpfcxxxx
+W:     http://www.avagotech.com
 S:     Supported
 F:     drivers/scsi/lpfc/
 
@@ -3912,7 +3927,7 @@ F:        drivers/extcon/
 F:     Documentation/extcon/
 
 EXYNOS DP DRIVER
-M:     Jingoo Han <jg1.han@samsung.com>
+M:     Jingoo Han <jingoohan1@gmail.com>
 L:     dri-devel@lists.freedesktop.org
 S:     Maintained
 F:     drivers/gpu/drm/exynos/exynos_dp*
@@ -4371,11 +4386,10 @@ F:      fs/gfs2/
 F:     include/uapi/linux/gfs2_ondisk.h
 
 GIGASET ISDN DRIVERS
-M:     Hansjoerg Lipp <hjlipp@web.de>
-M:     Tilman Schmidt <tilman@imap.cc>
+M:     Paul Bolle <pebolle@tiscali.nl>
 L:     gigaset307x-common@lists.sourceforge.net
 W:     http://gigaset307x.sourceforge.net/
-S:     Maintained
+S:     Odd Fixes
 F:     Documentation/isdn/README.gigaset
 F:     drivers/isdn/gigaset/
 F:     include/uapi/linux/gigaset_dev.h
@@ -4522,7 +4536,7 @@ M:        Jean Delvare <jdelvare@suse.de>
 M:     Guenter Roeck <linux@roeck-us.net>
 L:     lm-sensors@lm-sensors.org
 W:     http://www.lm-sensors.org/
-T:     quilt kernel.org/pub/linux/kernel/people/jdelvare/linux-2.6/jdelvare-hwmon/
+T:     quilt http://jdelvare.nerim.net/devel/linux/jdelvare-hwmon/
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/groeck/linux-staging.git
 S:     Maintained
 F:     Documentation/hwmon/
@@ -5042,17 +5056,19 @@ S:      Orphan
 F:     drivers/video/fbdev/imsttfb.c
 
 INFINIBAND SUBSYSTEM
-M:     Roland Dreier <roland@kernel.org>
+M:     Doug Ledford <dledford@redhat.com>
 M:     Sean Hefty <sean.hefty@intel.com>
 M:     Hal Rosenstock <hal.rosenstock@gmail.com>
 L:     linux-rdma@vger.kernel.org
 W:     http://www.openfabrics.org/
 Q:     http://patchwork.kernel.org/project/linux-rdma/list/
-T:     git git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband.git
+T:     git git://git.kernel.org/pub/scm/linux/kernel/git/dledford/rdma.git
 S:     Supported
 F:     Documentation/infiniband/
 F:     drivers/infiniband/
 F:     include/uapi/linux/if_infiniband.h
+F:     include/uapi/rdma/
+F:     include/rdma/
 
 INOTIFY
 M:     John McCutchan <john@johnmccutchan.com>
@@ -6951,6 +6967,17 @@ T:       git git://git.rocketboards.org/linux-socfpga-next.git
 S:     Maintained
 F:     arch/nios2/
 
+NOKIA N900 POWER SUPPLY DRIVERS
+M:     Pali Rohár <pali.rohar@gmail.com>
+S:     Maintained
+F:     include/linux/power/bq2415x_charger.h
+F:     include/linux/power/bq27x00_battery.h
+F:     include/linux/power/isp1704_charger.h
+F:     drivers/power/bq2415x_charger.c
+F:     drivers/power/bq27x00_battery.c
+F:     drivers/power/isp1704_charger.c
+F:     drivers/power/rx51_battery.c
+
 NTB DRIVER
 M:     Jon Mason <jdmason@kudzu.us>
 M:     Dave Jiang <dave.jiang@intel.com>
@@ -7539,7 +7566,7 @@ S:        Maintained
 F:     drivers/pci/host/*rcar*
 
 PCI DRIVER FOR SAMSUNG EXYNOS
-M:     Jingoo Han <jg1.han@samsung.com>
+M:     Jingoo Han <jingoohan1@gmail.com>
 L:     linux-pci@vger.kernel.org
 L:     linux-arm-kernel@lists.infradead.org (moderated for non-subscribers)
 L:     linux-samsung-soc@vger.kernel.org (moderated for non-subscribers)
@@ -7547,7 +7574,8 @@ S:        Maintained
 F:     drivers/pci/host/pci-exynos.c
 
 PCI DRIVER FOR SYNOPSIS DESIGNWARE
-M:     Jingoo Han <jg1.han@samsung.com>
+M:     Jingoo Han <jingoohan1@gmail.com>
+M:     Pratyush Anand <pratyush.anand@gmail.com>
 L:     linux-pci@vger.kernel.org
 S:     Maintained
 F:     drivers/pci/host/*designware*
@@ -7561,8 +7589,9 @@ F:        Documentation/devicetree/bindings/pci/host-generic-pci.txt
 F:     drivers/pci/host/pci-host-generic.c
 
 PCIE DRIVER FOR ST SPEAR13XX
+M:     Pratyush Anand <pratyush.anand@gmail.com>
 L:     linux-pci@vger.kernel.org
-S:     Orphan
+S:     Maintained
 F:     drivers/pci/host/*spear*
 
 PCMCIA SUBSYSTEM
@@ -7605,7 +7634,6 @@ F:        kernel/delayacct.c
 
 PERFORMANCE EVENTS SUBSYSTEM
 M:     Peter Zijlstra <a.p.zijlstra@chello.nl>
-M:     Paul Mackerras <paulus@samba.org>
 M:     Ingo Molnar <mingo@redhat.com>
 M:     Arnaldo Carvalho de Melo <acme@kernel.org>
 L:     linux-kernel@vger.kernel.org
@@ -8503,7 +8531,7 @@ S:        Supported
 F:     sound/soc/samsung/
 
 SAMSUNG FRAMEBUFFER DRIVER
-M:     Jingoo Han <jg1.han@samsung.com>
+M:     Jingoo Han <jingoohan1@gmail.com>
 L:     linux-fbdev@vger.kernel.org
 S:     Maintained
 F:     drivers/video/fbdev/s3c-fb.c
@@ -8802,16 +8830,19 @@ F:      drivers/misc/phantom.c
 F:     include/uapi/linux/phantom.h
 
 SERVER ENGINES 10Gbps iSCSI - BladeEngine 2 DRIVER
-M:     Jayamohan Kallickal <jayamohan.kallickal@emulex.com>
+M:     Jayamohan Kallickal <jayamohan.kallickal@avagotech.com>
+M:     Minh Tran <minh.tran@avagotech.com>
+M:     John Soni Jose <sony.john-n@avagotech.com>
 L:     linux-scsi@vger.kernel.org
-W:     http://www.emulex.com
+W:     http://www.avagotech.com
 S:     Supported
 F:     drivers/scsi/be2iscsi/
 
-SERVER ENGINES 10Gbps NIC - BladeEngine 2 DRIVER
-M:     Sathya Perla <sathya.perla@emulex.com>
-M:     Subbu Seetharaman <subbu.seetharaman@emulex.com>
-M:     Ajit Khaparde <ajit.khaparde@emulex.com>
+Emulex 10Gbps NIC BE2, BE3-R, Lancer, Skyhawk-R DRIVER
+M:     Sathya Perla <sathya.perla@avagotech.com>
+M:     Ajit Khaparde <ajit.khaparde@avagotech.com>
+M:     Padmanabh Ratnakar <padmanabh.ratnakar@avagotech.com>
+M:     Sriharsha Basavapatna <sriharsha.basavapatna@avagotech.com>
 L:     netdev@vger.kernel.org
 W:     http://www.emulex.com
 S:     Supported
@@ -10557,8 +10588,7 @@ F:      drivers/virtio/virtio_input.c
 F:     include/uapi/linux/virtio_input.h
 
 VIA RHINE NETWORK DRIVER
-M:     Roger Luethi <rl@hellgate.ch>
-S:     Maintained
+S:     Orphan
 F:     drivers/net/ethernet/via/via-rhine.c
 
 VIA SD/MMC CARD CONTROLLER DRIVER
@@ -10864,7 +10894,7 @@ M:      Andy Lutomirski <luto@amacapital.net>
 L:     linux-kernel@vger.kernel.org
 T:     git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/vdso
 S:     Maintained
-F:     arch/x86/vdso/
+F:     arch/x86/entry/vdso/
 
 XC2028/3028 TUNER DRIVER
 M:     Mauro Carvalho Chehab <mchehab@osg.samsung.com>
index 2da553fd7fc36146a36458612a21343277261495..6c6f14628f329d0ba10f5632fb362c818c437ff5 100644 (file)
--- a/Makefile
+++ b/Makefile
@@ -1,7 +1,7 @@
 VERSION = 4
 PATCHLEVEL = 1
 SUBLEVEL = 0
-EXTRAVERSION = -rc2
+EXTRAVERSION =
 NAME = Hurr durr I'ma sheep
 
 # *DOCUMENTATION*
@@ -215,7 +215,6 @@ VPATH               := $(srctree)$(if $(KBUILD_EXTMOD),:$(KBUILD_EXTMOD))
 
 export srctree objtree VPATH
 
-
 # SUBARCH tells the usermode build what the underlying arch is.  That is set
 # first, and if a usermode build is happening, the "ARCH=um" on the command
 # line overrides the setting of ARCH below.  If a native build is happening,
@@ -1497,11 +1496,11 @@ image_name:
 # Clear a bunch of variables before executing the submake
 tools/: FORCE
        $(Q)mkdir -p $(objtree)/tools
-       $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(objtree) subdir=tools -C $(src)/tools/
+       $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(O) subdir=tools -C $(src)/tools/
 
 tools/%: FORCE
        $(Q)mkdir -p $(objtree)/tools
-       $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(objtree) subdir=tools -C $(src)/tools/ $*
+       $(Q)$(MAKE) LDFLAGS= MAKEFLAGS="$(filter --j% -j,$(MAKEFLAGS))" O=$(O) subdir=tools -C $(src)/tools/ $*
 
 # Single targets
 # ---------------------------------------------------------------------------
index cd143887380a26da88e8372828dc586ce3ff7b31..8399bd0e68e8e5cb7aba078cd40864d89b971eed 100644 (file)
@@ -14,6 +14,9 @@ targets               := vmlinux.gz vmlinux \
                   tools/bootpzh bootloader bootpheader bootpzheader 
 OBJSTRIP       := $(obj)/tools/objstrip
 
+HOSTCFLAGS     := -Wall -I$(objtree)/usr/include
+BOOTCFLAGS     += -I$(obj) -I$(srctree)/$(obj)
+
 # SRM bootable image.  Copy to offset 512 of a partition.
 $(obj)/bootimage: $(addprefix $(obj)/tools/,mkbb lxboot bootlx) $(obj)/vmlinux.nh
        ( cat $(obj)/tools/lxboot $(obj)/tools/bootlx $(obj)/vmlinux.nh ) > $@ 
@@ -96,13 +99,14 @@ $(obj)/tools/bootph: $(obj)/bootpheader $(OBJSTRIP) FORCE
 $(obj)/tools/bootpzh: $(obj)/bootpzheader $(OBJSTRIP) FORCE
        $(call if_changed,objstrip)
 
-LDFLAGS_bootloader   := -static -uvsprintf -T  #-N -relax
-LDFLAGS_bootpheader  := -static -uvsprintf -T  #-N -relax
-LDFLAGS_bootpzheader := -static -uvsprintf -T  #-N -relax
+LDFLAGS_bootloader   := -static -T # -N -relax
+LDFLAGS_bootloader   := -static -T # -N -relax
+LDFLAGS_bootpheader  := -static -T # -N -relax
+LDFLAGS_bootpzheader := -static -T # -N -relax
 
-OBJ_bootlx   := $(obj)/head.o $(obj)/main.o
-OBJ_bootph   := $(obj)/head.o $(obj)/bootp.o
-OBJ_bootpzh  := $(obj)/head.o $(obj)/bootpz.o $(obj)/misc.o
+OBJ_bootlx   := $(obj)/head.o $(obj)/stdio.o $(obj)/main.o
+OBJ_bootph   := $(obj)/head.o $(obj)/stdio.o $(obj)/bootp.o
+OBJ_bootpzh  := $(obj)/head.o $(obj)/stdio.o $(obj)/bootpz.o $(obj)/misc.o
 
 $(obj)/bootloader: $(obj)/bootloader.lds $(OBJ_bootlx) $(LIBS_Y) FORCE
        $(call if_changed,ld)
index 3baf2d1e908df5760f1304bab309ae70877eee03..dd6eb4a33582e63def4b015c0a1ad496889feacd 100644 (file)
@@ -19,7 +19,6 @@
 
 #include "ksize.h"
 
-extern int vsprintf(char *, const char *, va_list);
 extern unsigned long switch_to_osf_pal(unsigned long nr,
        struct pcb_struct * pcb_va, struct pcb_struct * pcb_pa,
        unsigned long *vptb);
diff --git a/arch/alpha/boot/stdio.c b/arch/alpha/boot/stdio.c
new file mode 100644 (file)
index 0000000..f844dae
--- /dev/null
@@ -0,0 +1,306 @@
+/*
+ * Copyright (C) Paul Mackerras 1997.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+#include <stdarg.h>
+#include <stddef.h>
+
+size_t strnlen(const char * s, size_t count)
+{
+       const char *sc;
+
+       for (sc = s; count-- && *sc != '\0'; ++sc)
+               /* nothing */;
+       return sc - s;
+}
+
+# define do_div(n, base) ({                                            \
+       unsigned int __base = (base);                                   \
+       unsigned int __rem;                                             \
+       __rem = ((unsigned long long)(n)) % __base;                     \
+       (n) = ((unsigned long long)(n)) / __base;                       \
+       __rem;                                                          \
+})
+
+
+static int skip_atoi(const char **s)
+{
+       int i, c;
+
+       for (i = 0; '0' <= (c = **s) && c <= '9'; ++*s)
+               i = i*10 + c - '0';
+       return i;
+}
+
+#define ZEROPAD        1               /* pad with zero */
+#define SIGN   2               /* unsigned/signed long */
+#define PLUS   4               /* show plus */
+#define SPACE  8               /* space if plus */
+#define LEFT   16              /* left justified */
+#define SPECIAL        32              /* 0x */
+#define LARGE  64              /* use 'ABCDEF' instead of 'abcdef' */
+
+static char * number(char * str, unsigned long long num, int base, int size, int precision, int type)
+{
+       char c,sign,tmp[66];
+       const char *digits="0123456789abcdefghijklmnopqrstuvwxyz";
+       int i;
+
+       if (type & LARGE)
+               digits = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ";
+       if (type & LEFT)
+               type &= ~ZEROPAD;
+       if (base < 2 || base > 36)
+               return 0;
+       c = (type & ZEROPAD) ? '0' : ' ';
+       sign = 0;
+       if (type & SIGN) {
+               if ((signed long long)num < 0) {
+                       sign = '-';
+                       num = - (signed long long)num;
+                       size--;
+               } else if (type & PLUS) {
+                       sign = '+';
+                       size--;
+               } else if (type & SPACE) {
+                       sign = ' ';
+                       size--;
+               }
+       }
+       if (type & SPECIAL) {
+               if (base == 16)
+                       size -= 2;
+               else if (base == 8)
+                       size--;
+       }
+       i = 0;
+       if (num == 0)
+               tmp[i++]='0';
+       else while (num != 0) {
+               tmp[i++] = digits[do_div(num, base)];
+       }
+       if (i > precision)
+               precision = i;
+       size -= precision;
+       if (!(type&(ZEROPAD+LEFT)))
+               while(size-->0)
+                       *str++ = ' ';
+       if (sign)
+               *str++ = sign;
+       if (type & SPECIAL) {
+               if (base==8)
+                       *str++ = '0';
+               else if (base==16) {
+                       *str++ = '0';
+                       *str++ = digits[33];
+               }
+       }
+       if (!(type & LEFT))
+               while (size-- > 0)
+                       *str++ = c;
+       while (i < precision--)
+               *str++ = '0';
+       while (i-- > 0)
+               *str++ = tmp[i];
+       while (size-- > 0)
+               *str++ = ' ';
+       return str;
+}
+
+int vsprintf(char *buf, const char *fmt, va_list args)
+{
+       int len;
+       unsigned long long num;
+       int i, base;
+       char * str;
+       const char *s;
+
+       int flags;              /* flags to number() */
+
+       int field_width;        /* width of output field */
+       int precision;          /* min. # of digits for integers; max
+                                  number of chars for from string */
+       int qualifier;          /* 'h', 'l', or 'L' for integer fields */
+                               /* 'z' support added 23/7/1999 S.H.    */
+                               /* 'z' changed to 'Z' --davidm 1/25/99 */
+
+
+       for (str=buf ; *fmt ; ++fmt) {
+               if (*fmt != '%') {
+                       *str++ = *fmt;
+                       continue;
+               }
+
+               /* process flags */
+               flags = 0;
+               repeat:
+                       ++fmt;          /* this also skips first '%' */
+                       switch (*fmt) {
+                               case '-': flags |= LEFT; goto repeat;
+                               case '+': flags |= PLUS; goto repeat;
+                               case ' ': flags |= SPACE; goto repeat;
+                               case '#': flags |= SPECIAL; goto repeat;
+                               case '0': flags |= ZEROPAD; goto repeat;
+                               }
+
+               /* get field width */
+               field_width = -1;
+               if ('0' <= *fmt && *fmt <= '9')
+                       field_width = skip_atoi(&fmt);
+               else if (*fmt == '*') {
+                       ++fmt;
+                       /* it's the next argument */
+                       field_width = va_arg(args, int);
+                       if (field_width < 0) {
+                               field_width = -field_width;
+                               flags |= LEFT;
+                       }
+               }
+
+               /* get the precision */
+               precision = -1;
+               if (*fmt == '.') {
+                       ++fmt;
+                       if ('0' <= *fmt && *fmt <= '9')
+                               precision = skip_atoi(&fmt);
+                       else if (*fmt == '*') {
+                               ++fmt;
+                               /* it's the next argument */
+                               precision = va_arg(args, int);
+                       }
+                       if (precision < 0)
+                               precision = 0;
+               }
+
+               /* get the conversion qualifier */
+               qualifier = -1;
+               if (*fmt == 'l' && *(fmt + 1) == 'l') {
+                       qualifier = 'q';
+                       fmt += 2;
+               } else if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L'
+                       || *fmt == 'Z') {
+                       qualifier = *fmt;
+                       ++fmt;
+               }
+
+               /* default base */
+               base = 10;
+
+               switch (*fmt) {
+               case 'c':
+                       if (!(flags & LEFT))
+                               while (--field_width > 0)
+                                       *str++ = ' ';
+                       *str++ = (unsigned char) va_arg(args, int);
+                       while (--field_width > 0)
+                               *str++ = ' ';
+                       continue;
+
+               case 's':
+                       s = va_arg(args, char *);
+                       if (!s)
+                               s = "<NULL>";
+
+                       len = strnlen(s, precision);
+
+                       if (!(flags & LEFT))
+                               while (len < field_width--)
+                                       *str++ = ' ';
+                       for (i = 0; i < len; ++i)
+                               *str++ = *s++;
+                       while (len < field_width--)
+                               *str++ = ' ';
+                       continue;
+
+               case 'p':
+                       if (field_width == -1) {
+                               field_width = 2*sizeof(void *);
+                               flags |= ZEROPAD;
+                       }
+                       str = number(str,
+                               (unsigned long) va_arg(args, void *), 16,
+                               field_width, precision, flags);
+                       continue;
+
+
+               case 'n':
+                       if (qualifier == 'l') {
+                               long * ip = va_arg(args, long *);
+                               *ip = (str - buf);
+                       } else if (qualifier == 'Z') {
+                               size_t * ip = va_arg(args, size_t *);
+                               *ip = (str - buf);
+                       } else {
+                               int * ip = va_arg(args, int *);
+                               *ip = (str - buf);
+                       }
+                       continue;
+
+               case '%':
+                       *str++ = '%';
+                       continue;
+
+               /* integer number formats - set up the flags and "break" */
+               case 'o':
+                       base = 8;
+                       break;
+
+               case 'X':
+                       flags |= LARGE;
+               case 'x':
+                       base = 16;
+                       break;
+
+               case 'd':
+               case 'i':
+                       flags |= SIGN;
+               case 'u':
+                       break;
+
+               default:
+                       *str++ = '%';
+                       if (*fmt)
+                               *str++ = *fmt;
+                       else
+                               --fmt;
+                       continue;
+               }
+               if (qualifier == 'l') {
+                       num = va_arg(args, unsigned long);
+                       if (flags & SIGN)
+                               num = (signed long) num;
+               } else if (qualifier == 'q') {
+                       num = va_arg(args, unsigned long long);
+                       if (flags & SIGN)
+                               num = (signed long long) num;
+               } else if (qualifier == 'Z') {
+                       num = va_arg(args, size_t);
+               } else if (qualifier == 'h') {
+                       num = (unsigned short) va_arg(args, int);
+                       if (flags & SIGN)
+                               num = (signed short) num;
+               } else {
+                       num = va_arg(args, unsigned int);
+                       if (flags & SIGN)
+                               num = (signed int) num;
+               }
+               str = number(str, num, base, field_width, precision, flags);
+       }
+       *str = '\0';
+       return str-buf;
+}
+
+int sprintf(char * buf, const char *fmt, ...)
+{
+       va_list args;
+       int i;
+
+       va_start(args, fmt);
+       i=vsprintf(buf,fmt,args);
+       va_end(args);
+       return i;
+}
index 367d53d031fc04d51af471273a0256a5a08432c7..dee82695f48bad69b0d9cf81457196e321d2ff55 100644 (file)
@@ -27,6 +27,9 @@
 #include <linux/param.h>
 #ifdef __ELF__
 # include <linux/elf.h>
+# define elfhdr elf64_hdr
+# define elf_phdr elf64_phdr
+# define elf_check_arch(x) ((x)->e_machine == EM_ALPHA)
 #endif
 
 /* bootfile size must be multiple of BLOCK_SIZE: */
index 429e8cd0d78e5a8f7aaeafacc3f36fae0c6a49c4..e5117766529e87ff99170fd4a53fde8d89bba4ff 100644 (file)
@@ -66,6 +66,4 @@
 #undef __ASM__MB
 #undef ____cmpxchg
 
-#define __HAVE_ARCH_CMPXCHG 1
-
 #endif /* _ALPHA_CMPXCHG_H */
index f61e1a56c3787bcbd4a2ab093d1c58c7715fa6c4..4cb4b6d3452c0b3439c3aa3c0f928f74de09fb3a 100644 (file)
@@ -2,6 +2,5 @@
 #define _ALPHA_TYPES_H
 
 #include <asm-generic/int-ll64.h>
-#include <uapi/asm/types.h>
 
 #endif /* _ALPHA_TYPES_H */
index c509d306db4561ea65a40703b42e7f9bd078d352..a56e608db2f9e4aad716b96669de02c7571dc1df 100644 (file)
@@ -3,7 +3,7 @@
 
 #include <uapi/asm/unistd.h>
 
-#define NR_SYSCALLS                    511
+#define NR_SYSCALLS                    514
 
 #define __ARCH_WANT_OLD_READDIR
 #define __ARCH_WANT_STAT64
index d214a0358100b6ad82a63fce68bc6016eb9ddaa4..aa33bf5aacb6c1666203e38700939750c90cb5c5 100644 (file)
 #define __NR_sched_setattr             508
 #define __NR_sched_getattr             509
 #define __NR_renameat2                 510
+#define __NR_getrandom                 511
+#define __NR_memfd_create              512
+#define __NR_execveat                  513
 
 #endif /* _UAPI_ALPHA_UNISTD_H */
index 253cf1a87481e815ad9a724dde1fef51b5616d09..51267ac5729b9c7276a0e838357bfb8ffd29e7db 100644 (file)
@@ -6,7 +6,6 @@
  *     Error handling code supporting Alpha systems
  */
 
-#include <linux/init.h>
 #include <linux/sched.h>
 
 #include <asm/io.h>
index 7b2be251c30fb92981d4aef8cf4f8951bed24728..51f2c8654253f2bd6667ccff24c0db09a7f80ccc 100644 (file)
@@ -19,7 +19,6 @@
 #include <linux/ptrace.h>
 #include <linux/interrupt.h>
 #include <linux/random.h>
-#include <linux/init.h>
 #include <linux/irq.h>
 #include <linux/proc_fs.h>
 #include <linux/seq_file.h>
index e51f578636a5718d4f0e438b90b4b78a12b6b7da..36dc91ace83ae97069df82f5e3923a24275c6a9b 100644 (file)
@@ -1019,14 +1019,13 @@ SYSCALL_DEFINE2(osf_settimeofday, struct timeval32 __user *, tv,
        if (tv) {
                if (get_tv32((struct timeval *)&kts, tv))
                        return -EFAULT;
+               kts.tv_nsec *= 1000;
        }
        if (tz) {
                if (copy_from_user(&ktz, tz, sizeof(*tz)))
                        return -EFAULT;
        }
 
-       kts.tv_nsec *= 1000;
-
        return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
 }
 
index 1941a07b5811f925aed82e853aab4efb081f74ca..84d13263ce46f193ef0b223466cea2f522ca109d 100644 (file)
@@ -236,12 +236,11 @@ release_thread(struct task_struct *dead_task)
 }
 
 /*
- * Copy an alpha thread..
+ * Copy architecture-specific thread state
  */
-
 int
 copy_thread(unsigned long clone_flags, unsigned long usp,
-           unsigned long arg,
+           unsigned long kthread_arg,
            struct task_struct *p)
 {
        extern void ret_from_fork(void);
@@ -262,7 +261,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
                        sizeof(struct switch_stack) + sizeof(struct pt_regs));
                childstack->r26 = (unsigned long) ret_from_kernel_thread;
                childstack->r9 = usp;   /* function */
-               childstack->r10 = arg;
+               childstack->r10 = kthread_arg;
                childregs->hae = alpha_mv.hae_cache,
                childti->pcb.usp = 0;
                return 0;
index 99ac36d5de4efd10832804e82509e062606720e2..2f24447fef92071b0ba9b94d09f8ed1fdc25d2d1 100644 (file)
@@ -63,7 +63,6 @@ static struct {
 enum ipi_message_type {
        IPI_RESCHEDULE,
        IPI_CALL_FUNC,
-       IPI_CALL_FUNC_SINGLE,
        IPI_CPU_STOP,
 };
 
@@ -506,7 +505,6 @@ setup_profiling_timer(unsigned int multiplier)
        return -EINVAL;
 }
 
-\f
 static void
 send_ipi_message(const struct cpumask *to_whom, enum ipi_message_type operation)
 {
@@ -552,10 +550,6 @@ handle_ipi(struct pt_regs *regs)
                        generic_smp_call_function_interrupt();
                        break;
 
-               case IPI_CALL_FUNC_SINGLE:
-                       generic_smp_call_function_single_interrupt();
-                       break;
-
                case IPI_CPU_STOP:
                        halt();
 
@@ -606,7 +600,7 @@ void arch_send_call_function_ipi_mask(const struct cpumask *mask)
 
 void arch_send_call_function_single_ipi(int cpu)
 {
-       send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE);
+       send_ipi_message(cpumask_of(cpu), IPI_CALL_FUNC);
 }
 
 static void
index 6f01d9ad7b814700d8bd56094b13d3af3474cc11..72b59511e59aa350cc58d568cf896edba4f53602 100644 (file)
@@ -237,8 +237,7 @@ srmcons_init(void)
 
        return -ENODEV;
 }
-
-module_init(srmcons_init);
+device_initcall(srmcons_init);
 
 \f
 /*
index f21d61fab6787331d21571958185b637fc601bb7..24e41bd7d3c99060a7411c1c5774941249c89d72 100644 (file)
@@ -331,7 +331,7 @@ marvel_map_irq(const struct pci_dev *cdev, u8 slot, u8 pin)
        pci_read_config_byte(dev, PCI_INTERRUPT_LINE, &intline);
        irq = intline;
 
-       msi_loc = pci_find_capability(dev, PCI_CAP_ID_MSI);
+       msi_loc = dev->msi_cap;
        msg_ctl = 0;
        if (msi_loc) 
                pci_read_config_word(dev, msi_loc + PCI_MSI_FLAGS, &msg_ctl);
index 24789713f1eafb4757ec1084c32225ce88bf4ad4..9b62e3fd4f038a925657beb15de3de89f8548473 100644 (file)
@@ -529,6 +529,9 @@ sys_call_table:
        .quad sys_sched_setattr
        .quad sys_sched_getattr
        .quad sys_renameat2                     /* 510 */
+       .quad sys_getrandom
+       .quad sys_memfd_create
+       .quad sys_execveat
 
        .size sys_call_table, . - sys_call_table
        .type sys_call_table, @object
index 9c4c189eb22f5a9db2d2ae678756a5241b3e1ee5..74aceead06e98a391a1f0fc49f5486ef2562844c 100644 (file)
@@ -14,7 +14,6 @@
 #include <linux/tty.h>
 #include <linux/delay.h>
 #include <linux/module.h>
-#include <linux/init.h>
 #include <linux/kallsyms.h>
 #include <linux/ratelimit.h>
 
index 9d0ac091a52a7d16cf1f78f402ab48c511924a24..4a905bd667e2ef71542e2585469404478860bce5 100644 (file)
@@ -23,8 +23,7 @@
 #include <linux/smp.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
-
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 
 extern void die_if_kernel(char *,struct pt_regs *,long, unsigned long *);
 
@@ -107,7 +106,7 @@ do_page_fault(unsigned long address, unsigned long mmcsr,
 
        /* If we're in an interrupt context, or have no user context,
           we must not take the fault.  */
-       if (!mm || in_atomic())
+       if (!mm || faulthandler_disabled())
                goto no_context;
 
 #ifdef CONFIG_ALPHA_LARGE_VMALLOC
index 18aa9b4f94f1822be3e01ea0906fd2cf234c1205..086a0d5445c528b631cec10fd48c5643a4101f86 100644 (file)
@@ -8,7 +8,6 @@
  */
 
 #include <linux/oprofile.h>
-#include <linux/init.h>
 #include <linux/smp.h>
 #include <asm/ptrace.h>
 
index c32f8a0ad92543a0d6e6767e698f51da0972c17e..c300f5ef3482b82330d41c0b4d318362d76d4092 100644 (file)
@@ -8,7 +8,6 @@
  */
 
 #include <linux/oprofile.h>
-#include <linux/init.h>
 #include <linux/smp.h>
 #include <asm/ptrace.h>
 
index 1c84cc257fc7ec7a6c3df970722b381f0ed17ff3..02edf59716144e0939eb2933cfb303fa457ecbd7 100644 (file)
@@ -8,7 +8,6 @@
  */
 
 #include <linux/oprofile.h>
-#include <linux/init.h>
 #include <linux/smp.h>
 #include <asm/ptrace.h>
 
index 34a57a12655377727930f8abba88082f3afde149..adb1744d20f3845efb48a314e56784a5c5470a0a 100644 (file)
@@ -9,7 +9,6 @@
  */
 
 #include <linux/oprofile.h>
-#include <linux/init.h>
 #include <linux/smp.h>
 #include <asm/ptrace.h>
 
index a7fc0da25650ef8920e5cfc9f25d417a36081617..ff6a4b5ce92781442aa10f37537d0d6afb1d15f3 100644 (file)
@@ -2,19 +2,6 @@ menu "Kernel hacking"
 
 source "lib/Kconfig.debug"
 
-config EARLY_PRINTK
-       bool "Early printk" if EMBEDDED
-       default y
-       help
-         Write kernel log output directly into the VGA buffer or to a serial
-         port.
-
-         This is useful for kernel debugging when your machine crashes very
-         early before the console code is initialized. For normal operation
-         it is not recommended because it looks ugly and doesn't cooperate
-         with klogd/syslogd or the X server. You should normally N here,
-         unless you want to debug such a crash.
-
 config 16KSTACKS
        bool "Use 16Kb for kernel stacks instead of 8Kb"
        help
index 067551b6920af99fe733f1f13d4aee8b1903a77b..9917a45fc430d042a4f59006abf84ceedad1bca7 100644 (file)
@@ -99,7 +99,7 @@ static inline void atomic_##op(int i, atomic_t *v)                    \
        atomic_ops_unlock(flags);                                       \
 }
 
-#define ATOMIC_OP_RETURN(op, c_op)                                     \
+#define ATOMIC_OP_RETURN(op, c_op, asm_op)                             \
 static inline int atomic_##op##_return(int i, atomic_t *v)             \
 {                                                                      \
        unsigned long flags;                                            \
index 4dc64ddebecebe354ef90b14fe28bc8804569a52..05b5aaf5b0f91e5580395e08ae778f5ddace5b3c 100644 (file)
@@ -53,7 +53,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
        if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
                return -EFAULT;
 
-       pagefault_disable();    /* implies preempt_disable() */
+       pagefault_disable();
 
        switch (op) {
        case FUTEX_OP_SET:
@@ -75,7 +75,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
                ret = -ENOSYS;
        }
 
-       pagefault_enable();     /* subsumes preempt_enable() */
+       pagefault_enable();
 
        if (!ret) {
                switch (cmp) {
@@ -104,7 +104,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
        return ret;
 }
 
-/* Compare-xchg with preemption disabled.
+/* Compare-xchg with pagefaults disabled.
  *  Notes:
  *      -Best-Effort: Exchg happens only if compare succeeds.
  *          If compare fails, returns; leaving retry/looping to upper layers
@@ -121,7 +121,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
        if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
                return -EFAULT;
 
-       pagefault_disable();    /* implies preempt_disable() */
+       pagefault_disable();
 
        /* TBD : can use llock/scond */
        __asm__ __volatile__(
@@ -142,7 +142,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, u32 oldval,
        : "r"(oldval), "r"(newval), "r"(uaddr), "ir"(-EFAULT)
        : "cc", "memory");
 
-       pagefault_enable();     /* subsumes preempt_enable() */
+       pagefault_enable();
 
        *uval = val;
        return val;
index cabd518cb253d95e3bf532d07bfbc5c2214d19fc..7cc4ced5dbf4e4894c6b7d594a9810df74ab26d0 100644 (file)
@@ -20,6 +20,7 @@ extern void iounmap(const void __iomem *addr);
 
 #define ioremap_nocache(phy, sz)       ioremap(phy, sz)
 #define ioremap_wc(phy, sz)            ioremap(phy, sz)
+#define ioremap_wt(phy, sz)            ioremap(phy, sz)
 
 /* Change struct page to physical address */
 #define page_to_phys(page)             (page_to_pfn(page) << PAGE_SHIFT)
index 8c3a3e02ba92c8adbc368dba9a3cd283f32c3e72..12b2100db0731a2a9ce99ce1398ae4599eaca87a 100644 (file)
@@ -266,7 +266,7 @@ static inline void __cache_line_loop(unsigned long paddr, unsigned long vaddr,
  * Machine specific helpers for Entire D-Cache or Per Line ops
  */
 
-static unsigned int __before_dc_op(const int op)
+static inline unsigned int __before_dc_op(const int op)
 {
        unsigned int reg = reg;
 
@@ -284,7 +284,7 @@ static unsigned int __before_dc_op(const int op)
        return reg;
 }
 
-static void __after_dc_op(const int op, unsigned int reg)
+static inline void __after_dc_op(const int op, unsigned int reg)
 {
        if (op & OP_FLUSH)      /* flush / flush-n-inv both wait */
                while (read_aux_reg(ARC_REG_DC_CTRL) & DC_CTRL_FLUSH_STATUS);
index 6a2e006cbcce1f1cd69866e0f0f9f94463d73dcb..d948e4e9d89c4ebe7e5676f449c9377b4fbe3535 100644 (file)
@@ -86,7 +86,7 @@ void do_page_fault(unsigned long address, struct pt_regs *regs)
         * If we're in an interrupt or have no user
         * context, we must not take the fault..
         */
-       if (in_atomic() || !mm)
+       if (faulthandler_disabled() || !mm)
                goto no_context;
 
        if (user_mode(regs))
index 86217db2937ab331666b710c1bd639c68caad516..992736b5229ba7bd06497feb35ecff5fc36ab232 100644 (file)
@@ -223,7 +223,7 @@ dtb-$(CONFIG_SOC_IMX25) += \
        imx25-eukrea-mbimxsd25-baseboard-dvi-vga.dtb \
        imx25-karo-tx25.dtb \
        imx25-pdk.dtb
-dtb-$(CONFIG_SOC_IMX31) += \
+dtb-$(CONFIG_SOC_IMX27) += \
        imx27-apf27.dtb \
        imx27-apf27dev.dtb \
        imx27-eukrea-mbimxsd27-baseboard.dtb \
index c3255e0c90aa829fc792f02d1265d413f3c6e624..dbb3f4d2bf84ebf4565555949053c94619ea161d 100644 (file)
 /include/ "tps65217.dtsi"
 
 &tps {
+       /*
+        * Configure pmic to enter OFF-state instead of SLEEP-state ("RTC-only
+        * mode") at poweroff.  Most BeagleBone versions do not support RTC-only
+        * mode and risk hardware damage if this mode is entered.
+        *
+        * For details, see linux-omap mailing list May 2015 thread
+        *      [PATCH] ARM: dts: am335x-bone* enable pmic-shutdown-controller
+        * In particular, messages:
+        *      http://www.spinics.net/lists/linux-omap/msg118585.html
+        *      http://www.spinics.net/lists/linux-omap/msg118615.html
+        *
+        * You can override this later with
+        *      &tps {  /delete-property/ ti,pmic-shutdown-controller;  }
+        * if you want to use RTC-only mode and made sure you are not affected
+        * by the hardware problems. (Tip: double-check by performing a current
+        * measurement after shutdown: it should be less than 1 mA.)
+        */
+       ti,pmic-shutdown-controller;
+
        regulators {
                dcdc1_reg: regulator@0 {
                        regulator-name = "vdds_dpr";
index 5c42d259fa68fbf29c98439306badd1d44987ee0..901739fcb85a37abba32822463caea432637d274 100644 (file)
@@ -80,7 +80,3 @@
                status = "okay";
        };
 };
-
-&rtc {
-       system-power-controller;
-};
index 87fc7a35e80261cad03261fd9cf8b047606fc286..156d05efcb70bf5af737cf67892691d41b84fb2b 100644 (file)
        wlcore: wlcore@2 {
                compatible = "ti,wl1271";
                reg = <2>;
-               interrupt-parent = <&gpio1>;
+               interrupt-parent = <&gpio0>;
                interrupts = <31 IRQ_TYPE_LEVEL_HIGH>; /* gpio 31 */
                ref-clock-frequency = <38400000>;
        };
index 518b8fde88b0c87005fe68e413cfee769befac07..18cc826e9db534714a1b4d8a3cfc497e43ffcc85 100644 (file)
@@ -12,7 +12,7 @@
                #clock-cells = <0>;
                compatible = "ti,am35xx-gate-clock";
                clocks = <&ipss_ick>;
-               reg = <0x059c>;
+               reg = <0x032c>;
                ti,bit-shift = <1>;
        };
 
@@ -20,7 +20,7 @@
                #clock-cells = <0>;
                compatible = "ti,gate-clock";
                clocks = <&rmii_ck>;
-               reg = <0x059c>;
+               reg = <0x032c>;
                ti,bit-shift = <9>;
        };
 
@@ -28,7 +28,7 @@
                #clock-cells = <0>;
                compatible = "ti,am35xx-gate-clock";
                clocks = <&ipss_ick>;
-               reg = <0x059c>;
+               reg = <0x032c>;
                ti,bit-shift = <2>;
        };
 
@@ -36,7 +36,7 @@
                #clock-cells = <0>;
                compatible = "ti,gate-clock";
                clocks = <&pclk_ck>;
-               reg = <0x059c>;
+               reg = <0x032c>;
                ti,bit-shift = <10>;
        };
 
@@ -44,7 +44,7 @@
                #clock-cells = <0>;
                compatible = "ti,am35xx-gate-clock";
                clocks = <&ipss_ick>;
-               reg = <0x059c>;
+               reg = <0x032c>;
                ti,bit-shift = <0>;
        };
 
@@ -52,7 +52,7 @@
                #clock-cells = <0>;
                compatible = "ti,gate-clock";
                clocks = <&sys_ck>;
-               reg = <0x059c>;
+               reg = <0x032c>;
                ti,bit-shift = <8>;
        };
 
@@ -60,7 +60,7 @@
                #clock-cells = <0>;
                compatible = "ti,am35xx-gate-clock";
                clocks = <&sys_ck>;
-               reg = <0x059c>;
+               reg = <0x032c>;
                ti,bit-shift = <3>;
        };
 };
index 8ae29c955c11d7f83f4423184a8ed61412b515dd..c17097d2c167d4718f76200f51e599d6605d144e 100644 (file)
@@ -49,7 +49,7 @@
                pinctrl-0 = <&matrix_keypad_pins>;
 
                debounce-delay-ms = <5>;
-               col-scan-delay-us = <1500>;
+               col-scan-delay-us = <5>;
 
                row-gpios = <&gpio5 5 GPIO_ACTIVE_HIGH          /* Bank5, pin5 */
                                &gpio5 6 GPIO_ACTIVE_HIGH>;     /* Bank5, pin6 */
                interrupt-parent = <&gpio0>;
                interrupts = <31 0>;
 
-               wake-gpios = <&gpio1 28 GPIO_ACTIVE_HIGH>;
+               reset-gpios = <&gpio1 28 GPIO_ACTIVE_LOW>;
 
                touchscreen-size-x = <480>;
                touchscreen-size-y = <272>;
index 15f198e4864d308196640f9ce5a8a190efe8ab9a..7128fad991ac3459ac5349b0219e893fab1830c8 100644 (file)
@@ -18,6 +18,7 @@
        aliases {
                rtc0 = &mcp_rtc;
                rtc1 = &tps659038_rtc;
+               rtc2 = &rtc;
        };
 
        memory {
@@ -83,7 +84,7 @@
        gpio_fan: gpio_fan {
                /* Based on 5v 500mA AFB02505HHB */
                compatible = "gpio-fan";
-               gpios =  <&tps659038_gpio 1 GPIO_ACTIVE_HIGH>;
+               gpios =  <&tps659038_gpio 2 GPIO_ACTIVE_HIGH>;
                gpio-fan,speed-map = <0     0>,
                                     <13000 1>;
                #cooling-cells = <2>;
 
        uart3_pins_default: uart3_pins_default {
                pinctrl-single,pins = <
-                       0x248 (PIN_INPUT_SLEW | MUX_MODE0) /* uart3_rxd.rxd */
-                       0x24c (PIN_INPUT_SLEW | MUX_MODE0) /* uart3_txd.txd */
+                       0x3f8 (PIN_INPUT_SLEW | MUX_MODE2) /* uart2_ctsn.uart3_rxd */
+                       0x3fc (PIN_INPUT_SLEW | MUX_MODE1) /* uart2_rtsn.uart3_txd */
                >;
        };
 
        mcp_rtc: rtc@6f {
                compatible = "microchip,mcp7941x";
                reg = <0x6f>;
-               interrupts = <GIC_SPI 2 IRQ_TYPE_LEVEL_LOW>;  /* IRQ_SYS_1N */
+               interrupts = <GIC_SPI 2 IRQ_TYPE_EDGE_RISING>;  /* IRQ_SYS_1N */
 
                pinctrl-names = "default";
                pinctrl-0 = <&mcp79410_pins_default>;
 &uart3 {
        status = "okay";
        interrupts-extended = <&crossbar_mpu GIC_SPI 69 IRQ_TYPE_LEVEL_HIGH>,
-                             <&dra7_pmx_core 0x248>;
+                             <&dra7_pmx_core 0x3f8>;
 
        pinctrl-names = "default";
        pinctrl-0 = <&uart3_pins_default>;
index c675257f2377f8797565871e38e77ba50fe7678b..f076ff856d8b8223466f3ec8536caaa1974186ca 100644 (file)
@@ -69,7 +69,7 @@
                mainpll: mainpll {
                        compatible = "fixed-clock";
                        #clock-cells = <0>;
-                       clock-frequency = <2000000000>;
+                       clock-frequency = <1000000000>;
                };
                /* 25 MHz reference crystal */
                refclk: oscillator {
index ed2dd8ba4080df7a7f85db89068de44cd693446b..218a2acd36e509b0de8e22dc207e06253b1163fe 100644 (file)
                mainpll: mainpll {
                        compatible = "fixed-clock";
                        #clock-cells = <0>;
-                       clock-frequency = <2000000000>;
+                       clock-frequency = <1000000000>;
                };
 
                /* 25 MHz reference crystal */
index 0e85fc15cedad84c1ea5ebea6db033a2ace60703..ecd1318109bac8fb5d1e29c96f7c30779d217fda 100644 (file)
                mainpll: mainpll {
                        compatible = "fixed-clock";
                        #clock-cells = <0>;
-                       clock-frequency = <2000000000>;
+                       clock-frequency = <1000000000>;
                };
        };
 };
index a2cf2154dcdb68d8374c2bea4b136fccaccb7aa2..fdd187c55aa5f78b5ab61d15dc12c1ad001990d2 100644 (file)
 
                internal-regs {
 
+                       rtc@10300 {
+                               /* No crystal connected to the internal RTC */
+                               status = "disabled";
+                       };
+
                        /* J10: VCC, NC, RX, NC, TX, GND  */
                        serial@12000 {
                                status = "okay";
index e3b08fb959e5f8ffc27977fa85d05c70a4d60792..990e8a2100f0f3cff6c50989761d6b78838e4bd8 100644 (file)
                };
 
                internal-regs {
+                       rtc@10300 {
+                               /* No crystal connected to the internal RTC */
+                               status = "disabled";
+                       };
                        serial@12000 {
                                status = "okay";
                        };
index de8427be830a32e24a01ace97f11303435528b7b..289806adb343806aefce22e63b6caa1d558741fb 100644 (file)
                        ti,hwmods = "usb_otg_hs";
 
                        usb0: usb@47401000 {
-                               compatible = "ti,musb-am33xx";
+                               compatible = "ti,musb-dm816";
                                reg = <0x47401400 0x400
                                       0x47401000 0x200>;
                                reg-names = "mc", "control";
                        };
 
                        usb1: usb@47401800 {
-                               compatible = "ti,musb-am33xx";
+                               compatible = "ti,musb-dm816";
                                reg = <0x47401c00 0x400
                                       0x47401800 0x200>;
                                reg-names = "mc", "control";
index aae7efc09b0bd1ecd66e72193c6aefe107008b9e..e6fa251e17b93b54f67bcd39b8da57476ddda519 100644 (file)
@@ -87,6 +87,7 @@
 
                /* connect xtal input to 25MHz reference */
                clocks = <&ref25>;
+               clock-names = "xtal";
 
                /* connect xtal input as source of pll0 and pll1 */
                silabs,pll-source = <0 0>, <1 0>;
index 5332b57b4950dbd61fac6ef7cb112c4cd44cffa7..f03a091cd0766b606d0f3a61a4ab03c870f3dfcf 100644 (file)
                        ti,clock-cycles = <16>;
 
                        reg = <0x4ae07ddc 0x4>, <0x4ae07de0 0x4>,
-                             <0x4ae06014 0x4>, <0x4a003b20 0x8>,
+                             <0x4ae06014 0x4>, <0x4a003b20 0xc>,
                              <0x4ae0c158 0x4>;
                        reg-names = "setup-address", "control-address",
                                    "int-address", "efuse-address",
                        ti,clock-cycles = <16>;
 
                        reg = <0x4ae07e34 0x4>, <0x4ae07e24 0x4>,
-                             <0x4ae06010 0x4>, <0x4a0025cc 0x8>,
+                             <0x4ae06010 0x4>, <0x4a0025cc 0xc>,
                              <0x4a002470 0x4>;
                        reg-names = "setup-address", "control-address",
                                    "int-address", "efuse-address",
                        ti,clock-cycles = <16>;
 
                        reg = <0x4ae07e30 0x4>, <0x4ae07e20 0x4>,
-                             <0x4ae06010 0x4>, <0x4a0025e0 0x8>,
+                             <0x4ae06010 0x4>, <0x4a0025e0 0xc>,
                              <0x4a00246c 0x4>;
                        reg-names = "setup-address", "control-address",
                                    "int-address", "efuse-address",
                        ti,clock-cycles = <16>;
 
                        reg = <0x4ae07de4 0x4>, <0x4ae07de8 0x4>,
-                             <0x4ae06010 0x4>, <0x4a003b08 0x8>,
+                             <0x4ae06010 0x4>, <0x4a003b08 0xc>,
                              <0x4ae0c154 0x4>;
                        reg-names = "setup-address", "control-address",
                                    "int-address", "efuse-address",
                        status = "disabled";
                };
 
-               rtc@48838000 {
+               rtc: rtc@48838000 {
                        compatible = "ti,am3352-rtc";
                        reg = <0x48838000 0x100>;
                        interrupts = <GIC_SPI 217 IRQ_TYPE_LEVEL_HIGH>,
index 8de12af7c276f427c7a8901b5411326b5fd8f20e..d6b49e5b32e9f35ed2a1378e1f7933997c5d4e85 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <dt-bindings/sound/samsung-i2s.h>
 #include <dt-bindings/input/input.h>
+#include <dt-bindings/clock/maxim,max77686.h>
 #include "exynos4412.dtsi"
 
 / {
 
        rtc@10070000 {
                status = "okay";
+               clocks = <&clock CLK_RTC>, <&max77686 MAX77686_CLK_AP>;
+               clock-names = "rtc", "rtc_src";
        };
 
        g2d@10800000 {
index 173ffa479ad3cb03eb6e6742663fafaccacf9d53..792394dd0f2ab3ebf347af1e3f08d7ad330ee997 100644 (file)
 
                        display-timings {
                                timing-0 {
-                                       clock-frequency = <0>;
+                                       clock-frequency = <57153600>;
                                        hactive = <720>;
                                        vactive = <1280>;
                                        hfront-porch = <5>;
index 2657e842e5a5b68f6d840c70d5953e046608344f..1eca97ee4bd6320a3a3a44ede264e269041cd088 100644 (file)
        num-slots = <1>;
        broken-cd;
        cap-sdio-irq;
+       keep-power-in-suspend;
        card-detect-delay = <200>;
        samsung,dw-mshc-ciu-div = <3>;
        samsung,dw-mshc-sdr-timing = <2 3>;
index 0788d08fb43edd00b65f64864ab4a4476f7aaff6..146e71118a72b4eef98faf47f644c0216bbb5894 100644 (file)
        num-slots = <1>;
        broken-cd;
        cap-sdio-irq;
+       keep-power-in-suspend;
        card-detect-delay = <200>;
        clock-frequency = <400000000>;
        samsung,dw-mshc-ciu-div = <1>;
index 5d31fc14082360b41cc742ad3f549dc5d4f9013c..2180a0152c9bf9d78b8772af284d6445ad0b6ebd 100644 (file)
@@ -28,7 +28,7 @@ trips {
                type = "active";
        };
        cpu-crit-0 {
-               temperature = <1200000>; /* millicelsius */
+               temperature = <120000>; /* millicelsius */
                hysteresis = <0>; /* millicelsius */
                type = "critical";
        };
index f67b23f303c3904bfbf834e790a4bfd01f127ecd..45317538bbaeb48309d1b16c0e18beb8a7376639 100644 (file)
                clock-names = "dp";
                phys = <&dp_phy>;
                phy-names = "dp";
+               power-domains = <&disp_pd>;
        };
 
        mipi_phy: video-phy@10040714 {
index 48adfa8f4300b5c17d6bd43377568952c01b9593..356e963edf11e58f95c57f70be0661a6277cddd8 100644 (file)
@@ -18,7 +18,7 @@ trips {
                type = "active";
        };
        cpu-crit-0 {
-               temperature = <1050000>; /* millicelsius */
+               temperature = <105000>; /* millicelsius */
                hysteresis = <0>; /* millicelsius */
                type = "critical";
        };
index 412f41d62686f3ae966df4640d98988dc489c65f..02eb8b15374f3c84ef4b215f4d7da85d2e9f342a 100644 (file)
        num-slots = <1>;
        broken-cd;
        cap-sdio-irq;
+       keep-power-in-suspend;
        card-detect-delay = <200>;
        clock-frequency = <400000000>;
        samsung,dw-mshc-ciu-div = <1>;
index 7e6eef2488e807c12c36aaebfd3e64b076f7622f..82045398bf1fabdf47664290d636d78a38b23b14 100644 (file)
@@ -12,6 +12,7 @@
  */
 
 /dts-v1/;
+#include <dt-bindings/gpio/gpio.h>
 #include "imx23.dtsi"
 
 / {
@@ -93,6 +94,7 @@
 
        ahb@80080000 {
                usb0: usb@80080000 {
+                       dr_mode = "host";
                        vbus-supply = <&reg_usb0_vbus>;
                        status = "okay";
                };
 
                user {
                        label = "green";
-                       gpios = <&gpio2 1 1>;
+                       gpios = <&gpio2 1 GPIO_ACTIVE_HIGH>;
                };
        };
 };
index e4d3aecc4ed2c0fd61b1c68a93f20a24b808bd8c..677f81d9dcd529f92b6cf856464ec6595ef00509 100644 (file)
 
                        pwm4: pwm@53fc8000 {
                                compatible = "fsl,imx25-pwm", "fsl,imx27-pwm";
+                               #pwm-cells = <2>;
                                reg = <0x53fc8000 0x4000>;
                                clocks = <&clks 108>, <&clks 52>;
                                clock-names = "ipg", "per";
index 6951b66d1ab7b4cbe37dcf8944a7626213979827..bc215e4b75fd52c6e5b2e271b4a9e6265d442205 100644 (file)
 
                        fec: ethernet@1002b000 {
                                compatible = "fsl,imx27-fec";
-                               reg = <0x1002b000 0x4000>;
+                               reg = <0x1002b000 0x1000>;
                                interrupts = <50>;
                                clocks = <&clks IMX27_CLK_FEC_IPG_GATE>,
                                         <&clks IMX27_CLK_FEC_AHB_GATE>;
index 25e25f82fbaea4d9cc5bafa62ec93d1e0ba3f1c4..4e073e8547425ee189a6c91331ade03052e1d999 100644 (file)
                                              80 81 68 69
                                              70 71 72 73
                                              74 75 76 77>;
-                               interrupt-names = "auart4-rx", "aurat4-tx", "spdif-tx", "empty",
+                               interrupt-names = "auart4-rx", "auart4-tx", "spdif-tx", "empty",
                                                  "saif0", "saif1", "i2c0", "i2c1",
                                                  "auart0-rx", "auart0-tx", "auart1-rx", "auart1-tx",
                                                  "auart2-rx", "auart2-tx", "auart3-rx", "auart3-tx";
index 19cc269a08d4d7acdc3572821b32399ee584f824..1ce6133b67f5c65fefd2fe85d368ac455b199950 100644 (file)
@@ -31,6 +31,7 @@
                        regulator-min-microvolt = <5000000>;
                        regulator-max-microvolt = <5000000>;
                        gpio = <&gpio4 15 0>;
+                       enable-active-high;
                };
 
                reg_usb_h1_vbus: regulator@1 {
@@ -40,6 +41,7 @@
                        regulator-min-microvolt = <5000000>;
                        regulator-max-microvolt = <5000000>;
                        gpio = <&gpio1 0 0>;
+                       enable-active-high;
                };
        };
 
index 46b2fed7c319c891dde01d415521b7207702b45b..3b24b12651b2b86ee1a74d5baccca435778ddd3e 100644 (file)
 &i2c3 {
        pinctrl-names = "default";
        pinctrl-0 = <&pinctrl_i2c3>;
-       pinctrl-assert-gpios = <&gpio5 4 GPIO_ACTIVE_HIGH>;
        status = "okay";
 
        max7310_a: gpio@30 {
index 134d3f27a8ec5ae1f4f68982da4047dfcff13e13..921de6605f075d878f407d925a5652195bd2b41d 100644 (file)
        nand@0,0 {
                reg = <0 0 4>; /* CS0, offset 0, IO size 4 */
                nand-bus-width = <16>;
+               gpmc,device-width = <2>;
+               ti,nand-ecc-opt = "sw";
 
                gpmc,sync-clk-ps = <0>;
                gpmc,cs-on-ns = <0>;
index a29315833ecd3bfc1f1a7f86f7f3c5734453a00f..5f5e0f3d5b64fcb2283f72b9df923c793be7f75c 100644 (file)
                DRVDD-supply = <&vmmc2>;
                IOVDD-supply = <&vio>;
                DVDD-supply = <&vio>;
+
+               ai3x-micbias-vg = <1>;
        };
 
        tlv320aic3x_aux: tlv320aic3x@19 {
                DRVDD-supply = <&vmmc2>;
                IOVDD-supply = <&vio>;
                DVDD-supply = <&vio>;
+
+               ai3x-micbias-vg = <2>;
        };
 
        tsl2563: tsl2563@29 {
                touchscreen-fuzz-x = <4>;
                touchscreen-fuzz-y = <7>;
                touchscreen-fuzz-pressure = <2>;
-               touchscreen-max-x = <4096>;
-               touchscreen-max-y = <4096>;
+               touchscreen-size-x = <4096>;
+               touchscreen-size-y = <4096>;
                touchscreen-max-pressure = <2048>;
 
                ti,x-plate-ohms = <280>;
index d18a90f5eca3145e527926a5d2bbbb44c2d8b1c0..69a40cfc1f29dddbd515b4f821459442787d401a 100644 (file)
                };
 
                mmu_isp: mmu@480bd400 {
+                       #iommu-cells = <0>;
                        compatible = "ti,omap2-iommu";
                        reg = <0x480bd400 0x80>;
                        interrupts = <24>;
                };
 
                mmu_iva: mmu@5d000000 {
+                       #iommu-cells = <0>;
                        compatible = "ti,omap2-iommu";
                        reg = <0x5d000000 0x80>;
                        interrupts = <28>;
index efe5f737f39b42b7f01710abdd4e72d90bd53514..7d24ae0306b56845c80c2c783dd818033a3a6cd1 100644 (file)
         * hierarchy.
         */
        ocp {
-               compatible = "ti,omap4-l3-noc", "simple-bus";
+               compatible = "ti,omap5-l3-noc", "simple-bus";
                #address-cells = <1>;
                #size-cells = <1>;
                ranges;
index 74c3212f1f11e47e8319ab67a252853400b2c910..824ddab9c3adcbde11ac8ea62f339af6aa1b3ec6 100644 (file)
                compatible = "adi,adv7511w";
                reg = <0x39>;
                interrupt-parent = <&gpio3>;
-               interrupts = <29 IRQ_TYPE_EDGE_FALLING>;
+               interrupts = <29 IRQ_TYPE_LEVEL_LOW>;
 
                adi,input-depth = <8>;
                adi,input-colorspace = "rgb";
index bfd3f1c734b8d84dec4a622032e1b29efa07b83d..2201cd5da3bb95843278b27c8f855d540d154041 100644 (file)
                        status = "disabled";
                };
 
-               vmmci: regulator-gpio {
-                       compatible = "regulator-gpio";
-
-                       regulator-min-microvolt = <1800000>;
-                       regulator-max-microvolt = <2900000>;
-                       regulator-name = "mmci-reg";
-                       regulator-type = "voltage";
-
-                       startup-delay-us = <100>;
-                       enable-active-high;
-
-                       states = <1800000 0x1
-                                 2900000 0x0>;
-
-                       status = "disabled";
-               };
-
                mcde@a0350000 {
                        compatible = "stericsson,mcde";
                        reg = <0xa0350000 0x1000>, /* MCDE */
index bf8f0eddc2c020d03dda99adfdc32d453e254c14..744c1e3a744df1530ba53cd2a56029a9e9ac5d37 100644 (file)
                        pinctrl-1 = <&i2c3_sleep_mode>;
                };
 
+               vmmci: regulator-gpio {
+                       compatible = "regulator-gpio";
+
+                       regulator-min-microvolt = <1800000>;
+                       regulator-max-microvolt = <2900000>;
+                       regulator-name = "mmci-reg";
+                       regulator-type = "voltage";
+
+                       startup-delay-us = <100>;
+                       enable-active-high;
+
+                       states = <1800000 0x1
+                                 2900000 0x0>;
+               };
+
                // External Micro SD slot
                sdi0_per1@80126000 {
                        arm,primecell-periphid = <0x10480180>;
index 206826a855c0dc0e5025539e5a8930405c8cdd56..1bc84ebdccaa2fe119ebb9e542f88b64f017cea9 100644 (file)
                };
 
                vmmci: regulator-gpio {
+                       compatible = "regulator-gpio";
+
                        gpios = <&gpio7 4 0x4>;
                        enable-gpio = <&gpio6 25 0x4>;
+
+                       regulator-min-microvolt = <1800000>;
+                       regulator-max-microvolt = <2900000>;
+                       regulator-name = "mmci-reg";
+                       regulator-type = "voltage";
+
+                       startup-delay-us = <100>;
+                       enable-active-high;
+
+                       states = <1800000 0x1
+                                 2900000 0x0>;
                };
 
                // External Micro SD slot
index cf01c818b8ea41999f231f07211aec46a86e505c..13cc7ca5e031e5f1814697e8ead9546079f6d04e 100644 (file)
                         <&tegra_car TEGRA124_CLK_PLL_U>,
                         <&tegra_car TEGRA124_CLK_USBD>;
                clock-names = "reg", "pll_u", "utmi-pads";
-               resets = <&tegra_car 59>, <&tegra_car 22>;
+               resets = <&tegra_car 22>, <&tegra_car 22>;
                reset-names = "usb", "utmi-pads";
                nvidia,hssync-start-delay = <0>;
                nvidia,idle-wait-delay = <17>;
                nvidia,hssquelch-level = <2>;
                nvidia,hsdiscon-level = <5>;
                nvidia,xcvr-hsslew = <12>;
+               nvidia,has-utmi-pad-registers;
                status = "disabled";
        };
 
                         <&tegra_car TEGRA124_CLK_PLL_U>,
                         <&tegra_car TEGRA124_CLK_USBD>;
                clock-names = "reg", "pll_u", "utmi-pads";
-               resets = <&tegra_car 22>, <&tegra_car 22>;
+               resets = <&tegra_car 58>, <&tegra_car 22>;
                reset-names = "usb", "utmi-pads";
                nvidia,hssync-start-delay = <0>;
                nvidia,idle-wait-delay = <17>;
                nvidia,hssquelch-level = <2>;
                nvidia,hsdiscon-level = <5>;
                nvidia,xcvr-hsslew = <12>;
-               nvidia,has-utmi-pad-registers;
                status = "disabled";
        };
 
                         <&tegra_car TEGRA124_CLK_PLL_U>,
                         <&tegra_car TEGRA124_CLK_USBD>;
                clock-names = "reg", "pll_u", "utmi-pads";
-               resets = <&tegra_car 58>, <&tegra_car 22>;
+               resets = <&tegra_car 59>, <&tegra_car 22>;
                reset-names = "usb", "utmi-pads";
                nvidia,hssync-start-delay = <0>;
                nvidia,idle-wait-delay = <17>;
index 7a2aeacd62c0c2cb23b1247ce3b5ca6c55225d24..107395c32d8265863fecb711311def376ab500b6 100644 (file)
                compatible = "arm,cortex-a15-pmu";
                interrupts = <0 68 4>,
                             <0 69 4>;
+               interrupt-affinity = <&cpu0>, <&cpu1>;
        };
 
        oscclk6a: oscclk6a {
index 23662b5a5e9d84554f34eb0fc3edad1ef029f4e6..d949facba37641b3b36f337cd3eefce7932280d9 100644 (file)
                #address-cells = <1>;
                #size-cells = <0>;
 
-               cpu@0 {
+               A9_0: cpu@0 {
                        device_type = "cpu";
                        compatible = "arm,cortex-a9";
                        reg = <0>;
                        next-level-cache = <&L2>;
                };
 
-               cpu@1 {
+               A9_1: cpu@1 {
                        device_type = "cpu";
                        compatible = "arm,cortex-a9";
                        reg = <1>;
                        next-level-cache = <&L2>;
                };
 
-               cpu@2 {
+               A9_2: cpu@2 {
                        device_type = "cpu";
                        compatible = "arm,cortex-a9";
                        reg = <2>;
                        next-level-cache = <&L2>;
                };
 
-               cpu@3 {
+               A9_3: cpu@3 {
                        device_type = "cpu";
                        compatible = "arm,cortex-a9";
                        reg = <3>;
                compatible = "arm,pl310-cache";
                reg = <0x1e00a000 0x1000>;
                interrupts = <0 43 4>;
+               cache-unified;
                cache-level = <2>;
                arm,data-latency = <1 1 1>;
                arm,tag-latency = <1 1 1>;
                             <0 61 4>,
                             <0 62 4>,
                             <0 63 4>;
+               interrupt-affinity = <&A9_0>, <&A9_1>, <&A9_2>, <&A9_3>;
+
        };
 
        dcc {
index a5cd2eda3edf4fdb4d616999532d8f94da2d7bd3..9ea54b3dba09b7de9be7936ae0a755c4ed115a97 100644 (file)
                };
 
                gem0: ethernet@e000b000 {
-                       compatible = "cdns,gem";
+                       compatible = "cdns,zynq-gem";
                        reg = <0xe000b000 0x1000>;
                        status = "disabled";
                        interrupts = <0 22 4>;
                };
 
                gem1: ethernet@e000c000 {
-                       compatible = "cdns,gem";
+                       compatible = "cdns,zynq-gem";
                        reg = <0xe000c000 0x1000>;
                        status = "disabled";
                        interrupts = <0 45 4>;
index ab86655c1f4be22869742d274b157e5470a2bb0c..fbbb1915c6a95a81ac3edc58a6725f96c3c8b890 100644 (file)
@@ -39,11 +39,14 @@ CONFIG_ARCH_HIP04=y
 CONFIG_ARCH_KEYSTONE=y
 CONFIG_ARCH_MESON=y
 CONFIG_ARCH_MXC=y
+CONFIG_SOC_IMX50=y
 CONFIG_SOC_IMX51=y
 CONFIG_SOC_IMX53=y
 CONFIG_SOC_IMX6Q=y
 CONFIG_SOC_IMX6SL=y
+CONFIG_SOC_IMX6SX=y
 CONFIG_SOC_VF610=y
+CONFIG_SOC_LS1021A=y
 CONFIG_ARCH_OMAP3=y
 CONFIG_ARCH_OMAP4=y
 CONFIG_SOC_OMAP5=y
@@ -426,7 +429,7 @@ CONFIG_USB_EHCI_EXYNOS=y
 CONFIG_USB_EHCI_TEGRA=y
 CONFIG_USB_EHCI_HCD_STI=y
 CONFIG_USB_EHCI_HCD_PLATFORM=y
-CONFIG_USB_ISP1760_HCD=y
+CONFIG_USB_ISP1760=y
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_OHCI_HCD_STI=y
 CONFIG_USB_OHCI_HCD_PLATFORM=y
index 9ff7b54b2a837aa509b46d71b38741b7ded2c25d..3743ca221d402ce5d89df72baca5b1500ebb799b 100644 (file)
@@ -393,7 +393,7 @@ CONFIG_TI_EDMA=y
 CONFIG_DMA_OMAP=y
 # CONFIG_IOMMU_SUPPORT is not set
 CONFIG_EXTCON=m
-CONFIG_EXTCON_GPIO=m
+CONFIG_EXTCON_USB_GPIO=m
 CONFIG_EXTCON_PALMAS=m
 CONFIG_TI_EMIF=m
 CONFIG_PWM=y
index d2f81e6b8c1cc5adb914ce38a7ab991b25801137..6c2327e1c7323d79831af30bb296c55a9409e9bd 100644 (file)
@@ -81,7 +81,7 @@ do {                                                                  \
 #define read_barrier_depends()         do { } while(0)
 #define smp_read_barrier_depends()     do { } while(0)
 
-#define set_mb(var, value)     do { var = value; smp_mb(); } while (0)
+#define smp_store_mb(var, value)       do { WRITE_ONCE(var, value); smp_mb(); } while (0)
 
 #define smp_mb__before_atomic()        smp_mb()
 #define smp_mb__after_atomic() smp_mb()
index 8e3fcb924db6f13fcf0c5c08f6c8bb6cdc28a36b..2ef282f96651fbac9daa8b614c697c8020d1715f 100644 (file)
@@ -25,7 +25,7 @@ struct dma_iommu_mapping {
 };
 
 struct dma_iommu_mapping *
-arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size);
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size);
 
 void arm_iommu_release_mapping(struct dma_iommu_mapping *mapping);
 
index 4e78065a16aa3c6a3dae5db147f36fda00dcf18a..5eed82809d82b7aa9c74670fd9c9624bbd930803 100644 (file)
@@ -93,6 +93,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
        if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
                return -EFAULT;
 
+       preempt_disable();
        __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n"
        "1:     " TUSER(ldr) "  %1, [%4]\n"
        "       teq     %1, %2\n"
@@ -104,6 +105,8 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
        : "cc", "memory");
 
        *uval = val;
+       preempt_enable();
+
        return ret;
 }
 
@@ -124,7 +127,10 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
        if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
                return -EFAULT;
 
-       pagefault_disable();    /* implies preempt_disable() */
+#ifndef CONFIG_SMP
+       preempt_disable();
+#endif
+       pagefault_disable();
 
        switch (op) {
        case FUTEX_OP_SET:
@@ -146,7 +152,10 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
                ret = -ENOSYS;
        }
 
-       pagefault_enable();     /* subsumes preempt_enable() */
+       pagefault_enable();
+#ifndef CONFIG_SMP
+       preempt_enable();
+#endif
 
        if (!ret) {
                switch (cmp) {
index db58deb00aa74c8176380075d06b60e6b0858009..1b7677d1e5e12063167dbbd05745d7ea82506678 100644 (file)
@@ -336,6 +336,7 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
 #define ioremap_nocache(cookie,size)   __arm_ioremap((cookie), (size), MT_DEVICE)
 #define ioremap_cache(cookie,size)     __arm_ioremap((cookie), (size), MT_DEVICE_CACHED)
 #define ioremap_wc(cookie,size)                __arm_ioremap((cookie), (size), MT_DEVICE_WC)
+#define ioremap_wt(cookie,size)                __arm_ioremap((cookie), (size), MT_DEVICE)
 #define iounmap                                __arm_iounmap
 
 /*
index 2fe85fff5ccacd3dfe67d72c4b6fd5d65342e5e9..370f7a732900ae12e8831e6f3ce7390d16455fc8 100644 (file)
@@ -18,7 +18,7 @@ extern struct cputopo_arm cpu_topology[NR_CPUS];
 #define topology_physical_package_id(cpu)      (cpu_topology[cpu].socket_id)
 #define topology_core_id(cpu)          (cpu_topology[cpu].core_id)
 #define topology_core_cpumask(cpu)     (&cpu_topology[cpu].core_sibling)
-#define topology_thread_cpumask(cpu)   (&cpu_topology[cpu].thread_sibling)
+#define topology_sibling_cpumask(cpu)  (&cpu_topology[cpu].thread_sibling)
 
 void init_cpu_topology(void);
 void store_cpu_topology(unsigned int cpuid);
index f8ccc21fa032354facead9735abf9f4eb0cb7eb2..4e7f40c577e6e4fe9df3dd677b0d146b1ceb13c2 100644 (file)
@@ -33,7 +33,9 @@ ret_fast_syscall:
  UNWIND(.fnstart       )
  UNWIND(.cantunwind    )
        disable_irq                             @ disable interrupts
-       ldr     r1, [tsk, #TI_FLAGS]
+       ldr     r1, [tsk, #TI_FLAGS]            @ re-check for syscall tracing
+       tst     r1, #_TIF_SYSCALL_WORK
+       bne     __sys_trace_return
        tst     r1, #_TIF_WORK_MASK
        bne     fast_work_pending
        asm_trace_hardirqs_on
index 91c7ba182dcdd9b9e84ce8f5222181b32922deaf..3b8c2833c5379aa36ca3a0a384bb740df3f3284a 100644 (file)
@@ -303,9 +303,15 @@ static int probe_current_pmu(struct arm_pmu *pmu)
 
 static int of_pmu_irq_cfg(struct platform_device *pdev)
 {
-       int i;
-       int *irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
+       int i, irq;
+       int *irqs;
 
+       /* Don't bother with PPIs; they're already affine */
+       irq = platform_get_irq(pdev, 0);
+       if (irq >= 0 && irq_is_percpu(irq))
+               return 0;
+
+       irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
        if (!irqs)
                return -ENOMEM;
 
@@ -317,7 +323,7 @@ static int of_pmu_irq_cfg(struct platform_device *pdev)
                                      i);
                if (!dn) {
                        pr_warn("Failed to parse %s/interrupt-affinity[%d]\n",
-                               of_node_full_name(dn), i);
+                               of_node_full_name(pdev->dev.of_node), i);
                        break;
                }
 
index acd5b560b72801c6e2e4b51baa5ca4c129f44b2d..5f5cd562c593ef5f6a9224254c146e7d3d0136a3 100644 (file)
@@ -159,6 +159,8 @@ extern void exynos_enter_aftr(void);
 
 extern struct cpuidle_exynos_data cpuidle_coupled_exynos_data;
 
+extern void exynos_set_delayed_reset_assertion(bool enable);
+
 extern void s5p_init_cpu(void __iomem *cpuid_addr);
 extern unsigned int samsung_rev(void);
 extern void __iomem *cpu_boot_reg_base(void);
index bcde0dd668df950f1918a66179715a56ec9578fa..5917a30eee33f286c80708dfb84cdd610bd2dd9d 100644 (file)
@@ -166,6 +166,33 @@ static void __init exynos_init_io(void)
        exynos_map_io();
 }
 
+/*
+ * Set or clear the USE_DELAYED_RESET_ASSERTION option. Used by smp code
+ * and suspend.
+ *
+ * This is necessary only on Exynos4 SoCs. When system is running
+ * USE_DELAYED_RESET_ASSERTION should be set so the ARM CLK clock down
+ * feature could properly detect global idle state when secondary CPU is
+ * powered down.
+ *
+ * However this should not be set when such system is going into suspend.
+ */
+void exynos_set_delayed_reset_assertion(bool enable)
+{
+       if (of_machine_is_compatible("samsung,exynos4")) {
+               unsigned int tmp, core_id;
+
+               for (core_id = 0; core_id < num_possible_cpus(); core_id++) {
+                       tmp = pmu_raw_readl(EXYNOS_ARM_CORE_OPTION(core_id));
+                       if (enable)
+                               tmp |= S5P_USE_DELAYED_RESET_ASSERTION;
+                       else
+                               tmp &= ~(S5P_USE_DELAYED_RESET_ASSERTION);
+                       pmu_raw_writel(tmp, EXYNOS_ARM_CORE_OPTION(core_id));
+               }
+       }
+}
+
 /*
  * Apparently, these SoCs are not able to wake-up from suspend using
  * the PMU. Too bad. Should they suddenly become capable of such a
index ebd135bb0995611517c1bce6d3b70deeeee37de4..a825bca2a2b699490809676cef6f66d4eb86fd21 100644 (file)
 
 extern void exynos4_secondary_startup(void);
 
-/*
- * Set or clear the USE_DELAYED_RESET_ASSERTION option, set on Exynos4 SoCs
- * during hot-(un)plugging CPUx.
- *
- * The feature can be cleared safely during first boot of secondary CPU.
- *
- * Exynos4 SoCs require setting USE_DELAYED_RESET_ASSERTION during powering
- * down a CPU so the CPU idle clock down feature could properly detect global
- * idle state when CPUx is off.
- */
-static void exynos_set_delayed_reset_assertion(u32 core_id, bool enable)
-{
-       if (soc_is_exynos4()) {
-               unsigned int tmp;
-
-               tmp = pmu_raw_readl(EXYNOS_ARM_CORE_OPTION(core_id));
-               if (enable)
-                       tmp |= S5P_USE_DELAYED_RESET_ASSERTION;
-               else
-                       tmp &= ~(S5P_USE_DELAYED_RESET_ASSERTION);
-               pmu_raw_writel(tmp, EXYNOS_ARM_CORE_OPTION(core_id));
-       }
-}
-
 #ifdef CONFIG_HOTPLUG_CPU
 static inline void cpu_leave_lowpower(u32 core_id)
 {
@@ -73,8 +49,6 @@ static inline void cpu_leave_lowpower(u32 core_id)
          : "=&r" (v)
          : "Ir" (CR_C), "Ir" (0x40)
          : "cc");
-
-        exynos_set_delayed_reset_assertion(core_id, false);
 }
 
 static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
@@ -87,14 +61,6 @@ static inline void platform_do_lowpower(unsigned int cpu, int *spurious)
                /* Turn the CPU off on next WFI instruction. */
                exynos_cpu_power_down(core_id);
 
-               /*
-                * Exynos4 SoCs require setting
-                * USE_DELAYED_RESET_ASSERTION so the CPU idle
-                * clock down feature could properly detect
-                * global idle state when CPUx is off.
-                */
-               exynos_set_delayed_reset_assertion(core_id, true);
-
                wfi();
 
                if (pen_release == core_id) {
@@ -371,9 +337,6 @@ static int exynos_boot_secondary(unsigned int cpu, struct task_struct *idle)
                udelay(10);
        }
 
-       /* No harm if this is called during first boot of secondary CPU */
-       exynos_set_delayed_reset_assertion(core_id, false);
-
        /*
         * now the secondary core is starting up let it run its
         * calibrations, then wait for it to finish
@@ -420,6 +383,8 @@ static void __init exynos_smp_prepare_cpus(unsigned int max_cpus)
 
        exynos_sysram_init();
 
+       exynos_set_delayed_reset_assertion(true);
+
        if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
                scu_enable(scu_base_addr());
 
index cbe56b35aea000fe2ad5841851b5eb9a57a307d5..a9686535f9ed460706a41a99dc60d6d4f575ab3b 100644 (file)
@@ -188,7 +188,7 @@ no_clk:
                args.np = np;
                args.args_count = 0;
                child_domain = of_genpd_get_from_provider(&args);
-               if (!child_domain)
+               if (IS_ERR(child_domain))
                        continue;
 
                if (of_parse_phandle_with_args(np, "power-domains",
@@ -196,7 +196,7 @@ no_clk:
                        continue;
 
                parent_domain = of_genpd_get_from_provider(&args);
-               if (!parent_domain)
+               if (IS_ERR(parent_domain))
                        continue;
 
                if (pm_genpd_add_subdomain(parent_domain, child_domain))
index 3e6aea7f83af199d7c624372fb96549430504cc2..7d23ce04cad5201919a58aefccafc88e882dd844 100644 (file)
@@ -87,8 +87,8 @@ static unsigned int exynos_pmu_spare3;
 static u32 exynos_irqwake_intmask = 0xffffffff;
 
 static const struct exynos_wkup_irq exynos3250_wkup_irq[] = {
-       { 105, BIT(1) }, /* RTC alarm */
-       { 106, BIT(2) }, /* RTC tick */
+       { 73, BIT(1) }, /* RTC alarm */
+       { 74, BIT(2) }, /* RTC tick */
        { /* sentinel */ },
 };
 
@@ -342,6 +342,8 @@ static void exynos_pm_enter_sleep_mode(void)
 
 static void exynos_pm_prepare(void)
 {
+       exynos_set_delayed_reset_assertion(false);
+
        /* Set wake-up mask registers */
        exynos_pm_set_wakeup_mask();
 
@@ -482,6 +484,7 @@ early_wakeup:
 
        /* Clear SLEEP mode set in INFORM1 */
        pmu_raw_writel(0x0, S5P_INFORM1);
+       exynos_set_delayed_reset_assertion(true);
 }
 
 static void exynos3250_pm_resume(void)
@@ -723,8 +726,10 @@ void __init exynos_pm_init(void)
                return;
        }
 
-       if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL)))
+       if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) {
                pr_warn("Outdated DT detected, suspend/resume will NOT work\n");
+               return;
+       }
 
        pm_data = (const struct exynos_pm_data *) match->data;
 
index 38a45260a7c8b522dc3b8c163a19d4debe711ca9..dd883698ff7eafe977d2a220468d5aad876ce5ce 100644 (file)
@@ -12,6 +12,8 @@
 #ifndef __GEMINI_COMMON_H__
 #define __GEMINI_COMMON_H__
 
+#include <linux/reboot.h>
+
 struct mtd_partition;
 
 extern void gemini_map_io(void);
@@ -26,6 +28,6 @@ extern int platform_register_pflash(unsigned int size,
                                    struct mtd_partition *parts,
                                    unsigned int nr_parts);
 
-extern void gemini_restart(char mode, const char *cmd);
+extern void gemini_restart(enum reboot_mode mode, const char *cmd);
 
 #endif /* __GEMINI_COMMON_H__ */
index b26659759e2750bfef5f171b7d770905019e58ed..21a6d6d4f9c43d1ab8a608962761098bc847e35f 100644 (file)
@@ -14,7 +14,9 @@
 #include <mach/hardware.h>
 #include <mach/global_reg.h>
 
-void gemini_restart(char mode, const char *cmd)
+#include "common.h"
+
+void gemini_restart(enum reboot_mode mode, const char *cmd)
 {
        __raw_writel(RESET_GLOBAL | RESET_CPU1,
                     IO_ADDRESS(GEMINI_GLOBAL_BASE) + GLOBAL_RESET);
index fb8d4a2ad48c0629b3cfbbe7c2b76c6646c9b436..a5edd7d60266985472ac9de11e6fb9801f8eadeb 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Copyright (C) 2010 Pengutronix, Wolfram Sang <w.sang@pengutronix.de>
+ * Copyright (C) 2010 Pengutronix, Wolfram Sang <kernel@pengutronix.de>
  *
  * This program is free software; you can redistribute it and/or modify it under
  * the terms of the GNU General Public License version 2 as published by the
index 4d60005e9277ce8f33307f411dd5c0baa8a6ac7a..6d0893a3828eb6b57322ddd0e0df26ac7e32bd85 100644 (file)
@@ -280,9 +280,15 @@ void __init imx_gpc_check_dt(void)
        struct device_node *np;
 
        np = of_find_compatible_node(NULL, NULL, "fsl,imx6q-gpc");
-       if (WARN_ON(!np ||
-                   !of_find_property(np, "interrupt-controller", NULL)))
-               pr_warn("Outdated DT detected, system is about to crash!!!\n");
+       if (WARN_ON(!np))
+               return;
+
+       if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) {
+               pr_warn("Outdated DT detected, suspend/resume will NOT work\n");
+
+               /* map GPC, so that at least CPUidle and WARs keep working */
+               gpc_base = of_iomap(np, 0);
+       }
 }
 
 #ifdef CONFIG_PM_GENERIC_DOMAINS
@@ -443,6 +449,10 @@ static int imx_gpc_probe(struct platform_device *pdev)
        struct regulator *pu_reg;
        int ret;
 
+       /* bail out if DT too old and doesn't provide the necessary info */
+       if (!of_property_read_bool(pdev->dev.of_node, "#power-domain-cells"))
+               return 0;
+
        pu_reg = devm_regulator_get_optional(&pdev->dev, "pu");
        if (PTR_ERR(pu_reg) == -ENODEV)
                pu_reg = NULL;
index 355b089368715427627dd39f1014ed7024ae8459..752969ff9de04f95ef8b9a695325901b6297cf78 100644 (file)
  */
 #define LINKS_PER_OCP_IF               2
 
+/*
+ * Address offset (in bytes) between the reset control and the reset
+ * status registers: 4 bytes on OMAP4
+ */
+#define OMAP4_RST_CTRL_ST_OFFSET       4
+
 /**
  * struct omap_hwmod_soc_ops - fn ptrs for some SoC-specific operations
  * @enable_module: function to enable a module (via MODULEMODE)
@@ -3016,10 +3022,12 @@ static int _omap4_deassert_hardreset(struct omap_hwmod *oh,
        if (ohri->st_shift)
                pr_err("omap_hwmod: %s: %s: hwmod data error: OMAP4 does not support st_shift\n",
                       oh->name, ohri->name);
-       return omap_prm_deassert_hardreset(ohri->rst_shift, 0,
+       return omap_prm_deassert_hardreset(ohri->rst_shift, ohri->rst_shift,
                                           oh->clkdm->pwrdm.ptr->prcm_partition,
                                           oh->clkdm->pwrdm.ptr->prcm_offs,
-                                          oh->prcm.omap4.rstctrl_offs, 0);
+                                          oh->prcm.omap4.rstctrl_offs,
+                                          oh->prcm.omap4.rstctrl_offs +
+                                          OMAP4_RST_CTRL_ST_OFFSET);
 }
 
 /**
@@ -3047,27 +3055,6 @@ static int _omap4_is_hardreset_asserted(struct omap_hwmod *oh,
                                              oh->prcm.omap4.rstctrl_offs);
 }
 
-/**
- * _am33xx_assert_hardreset - call AM33XX PRM hardreset fn with hwmod args
- * @oh: struct omap_hwmod * to assert hardreset
- * @ohri: hardreset line data
- *
- * Call am33xx_prminst_assert_hardreset() with parameters extracted
- * from the hwmod @oh and the hardreset line data @ohri.  Only
- * intended for use as an soc_ops function pointer.  Passes along the
- * return value from am33xx_prminst_assert_hardreset().  XXX This
- * function is scheduled for removal when the PRM code is moved into
- * drivers/.
- */
-static int _am33xx_assert_hardreset(struct omap_hwmod *oh,
-                                  struct omap_hwmod_rst_info *ohri)
-
-{
-       return omap_prm_assert_hardreset(ohri->rst_shift, 0,
-                                        oh->clkdm->pwrdm.ptr->prcm_offs,
-                                        oh->prcm.omap4.rstctrl_offs);
-}
-
 /**
  * _am33xx_deassert_hardreset - call AM33XX PRM hardreset fn with hwmod args
  * @oh: struct omap_hwmod * to deassert hardreset
@@ -3083,32 +3070,13 @@ static int _am33xx_assert_hardreset(struct omap_hwmod *oh,
 static int _am33xx_deassert_hardreset(struct omap_hwmod *oh,
                                     struct omap_hwmod_rst_info *ohri)
 {
-       return omap_prm_deassert_hardreset(ohri->rst_shift, ohri->st_shift, 0,
+       return omap_prm_deassert_hardreset(ohri->rst_shift, ohri->st_shift,
+                                          oh->clkdm->pwrdm.ptr->prcm_partition,
                                           oh->clkdm->pwrdm.ptr->prcm_offs,
                                           oh->prcm.omap4.rstctrl_offs,
                                           oh->prcm.omap4.rstst_offs);
 }
 
-/**
- * _am33xx_is_hardreset_asserted - call AM33XX PRM hardreset fn with hwmod args
- * @oh: struct omap_hwmod * to test hardreset
- * @ohri: hardreset line data
- *
- * Call am33xx_prminst_is_hardreset_asserted() with parameters
- * extracted from the hwmod @oh and the hardreset line data @ohri.
- * Only intended for use as an soc_ops function pointer.  Passes along
- * the return value from am33xx_prminst_is_hardreset_asserted().  XXX
- * This function is scheduled for removal when the PRM code is moved
- * into drivers/.
- */
-static int _am33xx_is_hardreset_asserted(struct omap_hwmod *oh,
-                                       struct omap_hwmod_rst_info *ohri)
-{
-       return omap_prm_is_hardreset_asserted(ohri->rst_shift, 0,
-                                             oh->clkdm->pwrdm.ptr->prcm_offs,
-                                             oh->prcm.omap4.rstctrl_offs);
-}
-
 /* Public functions */
 
 u32 omap_hwmod_read(struct omap_hwmod *oh, u16 reg_offs)
@@ -3908,21 +3876,13 @@ void __init omap_hwmod_init(void)
                soc_ops.init_clkdm = _init_clkdm;
                soc_ops.update_context_lost = _omap4_update_context_lost;
                soc_ops.get_context_lost = _omap4_get_context_lost;
-       } else if (soc_is_am43xx()) {
+       } else if (cpu_is_ti816x() || soc_is_am33xx() || soc_is_am43xx()) {
                soc_ops.enable_module = _omap4_enable_module;
                soc_ops.disable_module = _omap4_disable_module;
                soc_ops.wait_target_ready = _omap4_wait_target_ready;
                soc_ops.assert_hardreset = _omap4_assert_hardreset;
-               soc_ops.deassert_hardreset = _omap4_deassert_hardreset;
-               soc_ops.is_hardreset_asserted = _omap4_is_hardreset_asserted;
-               soc_ops.init_clkdm = _init_clkdm;
-       } else if (cpu_is_ti816x() || soc_is_am33xx()) {
-               soc_ops.enable_module = _omap4_enable_module;
-               soc_ops.disable_module = _omap4_disable_module;
-               soc_ops.wait_target_ready = _omap4_wait_target_ready;
-               soc_ops.assert_hardreset = _am33xx_assert_hardreset;
                soc_ops.deassert_hardreset = _am33xx_deassert_hardreset;
-               soc_ops.is_hardreset_asserted = _am33xx_is_hardreset_asserted;
+               soc_ops.is_hardreset_asserted = _omap4_is_hardreset_asserted;
                soc_ops.init_clkdm = _init_clkdm;
        } else {
                WARN(1, "omap_hwmod: unknown SoC type\n");
index e2223148ba4d37a0818fc65d19ddb00bdc49d56c..17e8004fc20f9e48c69722c32643f7684b7c9eb2 100644 (file)
@@ -544,6 +544,44 @@ static struct omap_hwmod am43xx_hdq1w_hwmod = {
        },
 };
 
+static struct omap_hwmod_class_sysconfig am43xx_vpfe_sysc = {
+       .rev_offs       = 0x0,
+       .sysc_offs      = 0x104,
+       .sysc_flags     = SYSC_HAS_MIDLEMODE | SYSC_HAS_SIDLEMODE,
+       .idlemodes      = (SIDLE_FORCE | SIDLE_NO | SIDLE_SMART |
+                               MSTANDBY_FORCE | MSTANDBY_SMART | MSTANDBY_NO),
+       .sysc_fields    = &omap_hwmod_sysc_type2,
+};
+
+static struct omap_hwmod_class am43xx_vpfe_hwmod_class = {
+       .name           = "vpfe",
+       .sysc           = &am43xx_vpfe_sysc,
+};
+
+static struct omap_hwmod am43xx_vpfe0_hwmod = {
+       .name           = "vpfe0",
+       .class          = &am43xx_vpfe_hwmod_class,
+       .clkdm_name     = "l3s_clkdm",
+       .prcm           = {
+               .omap4  = {
+                       .modulemode     = MODULEMODE_SWCTRL,
+                       .clkctrl_offs   = AM43XX_CM_PER_VPFE0_CLKCTRL_OFFSET,
+               },
+       },
+};
+
+static struct omap_hwmod am43xx_vpfe1_hwmod = {
+       .name           = "vpfe1",
+       .class          = &am43xx_vpfe_hwmod_class,
+       .clkdm_name     = "l3s_clkdm",
+       .prcm           = {
+               .omap4  = {
+                       .modulemode     = MODULEMODE_SWCTRL,
+                       .clkctrl_offs   = AM43XX_CM_PER_VPFE1_CLKCTRL_OFFSET,
+               },
+       },
+};
+
 /* Interfaces */
 static struct omap_hwmod_ocp_if am43xx_l3_main__l4_hs = {
        .master         = &am33xx_l3_main_hwmod,
@@ -825,6 +863,34 @@ static struct omap_hwmod_ocp_if am43xx_l4_ls__hdq1w = {
        .user           = OCP_USER_MPU | OCP_USER_SDMA,
 };
 
+static struct omap_hwmod_ocp_if am43xx_l3__vpfe0 = {
+       .master         = &am43xx_vpfe0_hwmod,
+       .slave          = &am33xx_l3_main_hwmod,
+       .clk            = "l3_gclk",
+       .user           = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l3__vpfe1 = {
+       .master         = &am43xx_vpfe1_hwmod,
+       .slave          = &am33xx_l3_main_hwmod,
+       .clk            = "l3_gclk",
+       .user           = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l4_ls__vpfe0 = {
+       .master         = &am33xx_l4_ls_hwmod,
+       .slave          = &am43xx_vpfe0_hwmod,
+       .clk            = "l4ls_gclk",
+       .user           = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
+static struct omap_hwmod_ocp_if am43xx_l4_ls__vpfe1 = {
+       .master         = &am33xx_l4_ls_hwmod,
+       .slave          = &am43xx_vpfe1_hwmod,
+       .clk            = "l4ls_gclk",
+       .user           = OCP_USER_MPU | OCP_USER_SDMA,
+};
+
 static struct omap_hwmod_ocp_if *am43xx_hwmod_ocp_ifs[] __initdata = {
        &am33xx_l4_wkup__synctimer,
        &am43xx_l4_ls__timer8,
@@ -925,6 +991,10 @@ static struct omap_hwmod_ocp_if *am43xx_hwmod_ocp_ifs[] __initdata = {
        &am43xx_l4_ls__dss_dispc,
        &am43xx_l4_ls__dss_rfbi,
        &am43xx_l4_ls__hdq1w,
+       &am43xx_l3__vpfe0,
+       &am43xx_l3__vpfe1,
+       &am43xx_l4_ls__vpfe0,
+       &am43xx_l4_ls__vpfe1,
        NULL,
 };
 
index 48df3b55057e4d346076ada24e6e7404a13a117a..d0261996db6d5e0e0f91f40e9429b4c9ea73a4c8 100644 (file)
 #define AM43XX_CM_PER_USBPHYOCP2SCP1_CLKCTRL_OFFSET    0x05C0
 #define AM43XX_CM_PER_DSS_CLKCTRL_OFFSET               0x0a20
 #define AM43XX_CM_PER_HDQ1W_CLKCTRL_OFFSET             0x04a0
-
+#define AM43XX_CM_PER_VPFE0_CLKCTRL_OFFSET             0x0068
+#define AM43XX_CM_PER_VPFE1_CLKCTRL_OFFSET             0x0070
 #endif
index cbefbd7cfdb5c02da2a9815f9662fe75879fe969..661d753df58469ecc84081ca3e2eef075de7d3cb 100644 (file)
 #define OMAP3430_VC_CMD_ONLP_SHIFT                     16
 #define OMAP3430_VC_CMD_RET_SHIFT                      8
 #define OMAP3430_VC_CMD_OFF_SHIFT                      0
+#define OMAP3430_SREN_MASK                             (1 << 4)
 #define OMAP3430_HSEN_MASK                             (1 << 3)
 #define OMAP3430_MCODE_MASK                            (0x7 << 0)
 #define OMAP3430_VALID_MASK                            (1 << 24)
index b1c7a33e00e74c36369b641a14c912110e17a23c..e794828dee553b2e943bc74b4ae819874c40455e 100644 (file)
@@ -35,6 +35,7 @@
 #define OMAP4430_GLOBAL_WARM_SW_RST_SHIFT                              1
 #define OMAP4430_GLOBAL_WUEN_MASK                                      (1 << 16)
 #define OMAP4430_HSMCODE_MASK                                          (0x7 << 0)
+#define OMAP4430_SRMODEEN_MASK                                         (1 << 4)
 #define OMAP4430_HSMODEEN_MASK                                         (1 << 3)
 #define OMAP4430_HSSCLL_SHIFT                                          24
 #define OMAP4430_ICEPICK_RST_SHIFT                                     9
index c4859c4d364692b575199f0287b4ee4751ccdee2..d0b15dbafa2efa01b1ba06531d1e163d0e70f16c 100644 (file)
@@ -87,12 +87,6 @@ u32 omap4_prminst_rmw_inst_reg_bits(u32 mask, u32 bits, u8 part, s16 inst,
        return v;
 }
 
-/*
- * Address offset (in bytes) between the reset control and the reset
- * status registers: 4 bytes on OMAP4
- */
-#define OMAP4_RST_CTRL_ST_OFFSET               4
-
 /**
  * omap4_prminst_is_hardreset_asserted - read the HW reset line state of
  * submodules contained in the hwmod module
@@ -141,11 +135,11 @@ int omap4_prminst_assert_hardreset(u8 shift, u8 part, s16 inst,
  * omap4_prminst_deassert_hardreset - deassert a submodule hardreset line and
  * wait
  * @shift: register bit shift corresponding to the reset line to deassert
- * @st_shift: status bit offset, not used for OMAP4+
+ * @st_shift: status bit offset corresponding to the reset line
  * @part: PRM partition
  * @inst: PRM instance offset
  * @rstctrl_offs: reset register offset
- * @st_offs: reset status register offset, not used for OMAP4+
+ * @rstst_offs: reset status register offset
  *
  * Some IPs like dsp, ipu or iva contain processors that require an HW
  * reset line to be asserted / deasserted in order to fully enable the
@@ -157,11 +151,11 @@ int omap4_prminst_assert_hardreset(u8 shift, u8 part, s16 inst,
  * of reset, or -EBUSY if the submodule did not exit reset promptly.
  */
 int omap4_prminst_deassert_hardreset(u8 shift, u8 st_shift, u8 part, s16 inst,
-                                    u16 rstctrl_offs, u16 st_offs)
+                                    u16 rstctrl_offs, u16 rstst_offs)
 {
        int c;
        u32 mask = 1 << shift;
-       u16 rstst_offs = rstctrl_offs + OMAP4_RST_CTRL_ST_OFFSET;
+       u32 st_mask = 1 << st_shift;
 
        /* Check the current status to avoid de-asserting the line twice */
        if (omap4_prminst_is_hardreset_asserted(shift, part, inst,
@@ -169,13 +163,13 @@ int omap4_prminst_deassert_hardreset(u8 shift, u8 st_shift, u8 part, s16 inst,
                return -EEXIST;
 
        /* Clear the reset status by writing 1 to the status bit */
-       omap4_prminst_rmw_inst_reg_bits(0xffffffff, mask, part, inst,
+       omap4_prminst_rmw_inst_reg_bits(0xffffffff, st_mask, part, inst,
                                        rstst_offs);
        /* de-assert the reset control line */
        omap4_prminst_rmw_inst_reg_bits(mask, 0, part, inst, rstctrl_offs);
        /* wait the status to be set */
-       omap_test_timeout(omap4_prminst_is_hardreset_asserted(shift, part, inst,
-                                                             rstst_offs),
+       omap_test_timeout(omap4_prminst_is_hardreset_asserted(st_shift, part,
+                                                             inst, rstst_offs),
                          MAX_MODULE_HARDRESET_WAIT, c);
 
        return (c == MAX_MODULE_HARDRESET_WAIT) ? -EBUSY : 0;
index d1dedc8195ed2569508e0d522301bdf535aefda8..eafd120b53f1bc15c82f2cc47dc8033e31ca566e 100644 (file)
@@ -203,23 +203,8 @@ save_context_wfi:
         */
        ldr     r1, kernel_flush
        blx     r1
-       /*
-        * The kernel doesn't interwork: v7_flush_dcache_all in particluar will
-        * always return in Thumb state when CONFIG_THUMB2_KERNEL is enabled.
-        * This sequence switches back to ARM.  Note that .align may insert a
-        * nop: bx pc needs to be word-aligned in order to work.
-        */
- THUMB(        .thumb          )
- THUMB(        .align          )
- THUMB(        bx      pc      )
- THUMB(        nop             )
-       .arm
-
        b       omap3_do_wfi
-
-/*
- * Local variables
- */
+ENDPROC(omap34xx_cpu_suspend)
 omap3_do_wfi_sram_addr:
        .word omap3_do_wfi_sram
 kernel_flush:
@@ -364,10 +349,7 @@ exit_nonoff_modes:
  * ===================================
  */
        ldmfd   sp!, {r4 - r11, pc}     @ restore regs and return
-
-/*
- * Local variables
- */
+ENDPROC(omap3_do_wfi)
 sdrc_power:
        .word   SDRC_POWER_V
 cm_idlest1_core:
index cef67af9e9b88aa50416c46cf4aab66769964437..cac46d852da18003a21fa3d66278e11e6cf51afc 100644 (file)
@@ -298,14 +298,11 @@ static int __init omap_dm_timer_init_one(struct omap_dm_timer *timer,
        if (IS_ERR(src))
                return PTR_ERR(src);
 
-       if (clk_get_parent(timer->fclk) != src) {
-               r = clk_set_parent(timer->fclk, src);
-               if (r < 0) {
-                       pr_warn("%s: %s cannot set source\n", __func__,
-                               oh->name);
-                       clk_put(src);
-                       return r;
-               }
+       r = clk_set_parent(timer->fclk, src);
+       if (r < 0) {
+               pr_warn("%s: %s cannot set source\n", __func__, oh->name);
+               clk_put(src);
+               return r;
        }
 
        clk_put(src);
index be9ef834fa81d56666f247dae2a598ef422ee15d..076fd20d7e5aa03adfa9d14ec15293e374ceaace 100644 (file)
@@ -316,7 +316,8 @@ static void __init omap3_vc_init_pmic_signaling(struct voltagedomain *voltdm)
         * idle. And we can also scale voltages to zero for off-idle.
         * Note that no actual voltage scaling during off-idle will
         * happen unless the board specific twl4030 PMIC scripts are
-        * loaded.
+        * loaded. See also omap_vc_i2c_init for comments regarding
+        * erratum i531.
         */
        val = voltdm->read(OMAP3_PRM_VOLTCTRL_OFFSET);
        if (!(val & OMAP3430_PRM_VOLTCTRL_SEL_OFF)) {
@@ -704,9 +705,16 @@ static void __init omap_vc_i2c_init(struct voltagedomain *voltdm)
                return;
        }
 
+       /*
+        * Note that for omap3 OMAP3430_SREN_MASK clears SREN to work around
+        * erratum i531 "Extra Power Consumed When Repeated Start Operation
+        * Mode Is Enabled on I2C Interface Dedicated for Smart Reflex (I2C4)".
+        * Otherwise I2C4 eventually leads into about 23mW extra power being
+        * consumed even during off idle using VMODE.
+        */
        i2c_high_speed = voltdm->pmic->i2c_high_speed;
        if (i2c_high_speed)
-               voltdm->rmw(vc->common->i2c_cfg_hsen_mask,
+               voltdm->rmw(vc->common->i2c_cfg_clear_mask,
                            vc->common->i2c_cfg_hsen_mask,
                            vc->common->i2c_cfg_reg);
 
index cdbdd78e755e2c60347ca66ca378d5895a9ab87e..89b83b7ff3ec5c34d4906f1504700c45c6a00abc 100644 (file)
@@ -34,6 +34,7 @@ struct voltagedomain;
  * @cmd_ret_shift: RET field shift in PRM_VC_CMD_VAL_* register
  * @cmd_off_shift: OFF field shift in PRM_VC_CMD_VAL_* register
  * @i2c_cfg_reg: I2C configuration register offset
+ * @i2c_cfg_clear_mask: high-speed mode bit clear mask in I2C config register
  * @i2c_cfg_hsen_mask: high-speed mode bit field mask in I2C config register
  * @i2c_mcode_mask: MCODE field mask for I2C config register
  *
@@ -52,6 +53,7 @@ struct omap_vc_common {
        u8 cmd_ret_shift;
        u8 cmd_off_shift;
        u8 i2c_cfg_reg;
+       u8 i2c_cfg_clear_mask;
        u8 i2c_cfg_hsen_mask;
        u8 i2c_mcode_mask;
 };
index 75bc4aa22b3a0963d0e6ed468fecc5e17257eef4..71d74c9172c15f0169203be438f6756b0a164965 100644 (file)
@@ -40,6 +40,7 @@ static struct omap_vc_common omap3_vc_common = {
        .cmd_onlp_shift  = OMAP3430_VC_CMD_ONLP_SHIFT,
        .cmd_ret_shift   = OMAP3430_VC_CMD_RET_SHIFT,
        .cmd_off_shift   = OMAP3430_VC_CMD_OFF_SHIFT,
+       .i2c_cfg_clear_mask = OMAP3430_SREN_MASK | OMAP3430_HSEN_MASK,
        .i2c_cfg_hsen_mask = OMAP3430_HSEN_MASK,
        .i2c_cfg_reg     = OMAP3_PRM_VC_I2C_CFG_OFFSET,
        .i2c_mcode_mask  = OMAP3430_MCODE_MASK,
index 085e5d6a04fd088c5422235478bb012861a96078..2abd5fa8a6972d2a435735c6419f750fbb37b567 100644 (file)
@@ -42,6 +42,7 @@ static const struct omap_vc_common omap4_vc_common = {
        .cmd_ret_shift = OMAP4430_RET_SHIFT,
        .cmd_off_shift = OMAP4430_OFF_SHIFT,
        .i2c_cfg_reg = OMAP4_PRM_VC_CFG_I2C_MODE_OFFSET,
+       .i2c_cfg_clear_mask = OMAP4430_SRMODEEN_MASK | OMAP4430_HSMODEEN_MASK,
        .i2c_cfg_hsen_mask = OMAP4430_HSMODEEN_MASK,
        .i2c_mcode_mask  = OMAP4430_HSMCODE_MASK,
 };
index 8896e71586f5e2255149db0ac4eb5cf90ece9e6b..f096836879634fb87897574a1476e26a196fb349 100644 (file)
@@ -691,4 +691,13 @@ config SHARPSL_PM_MAX1111
 config PXA310_ULPI
        bool
 
+config PXA_SYSTEMS_CPLDS
+       tristate "Motherboard cplds"
+       default ARCH_LUBBOCK || MACH_MAINSTONE
+       help
+         This driver supports the Lubbock and Mainstone multifunction chip
+         found on the pxa25x development platform system (Lubbock) and pxa27x
+         development platform system (Mainstone). This IO board supports the
+         interrupts handling, ethernet controller, flash chips, etc ...
+
 endif
index eb0bf7678a9909fdb473b791eb8f183877c73588..4087d334ecdfc6e36ace6ed23ce30fb9198ee6ef 100644 (file)
@@ -90,4 +90,5 @@ obj-$(CONFIG_MACH_RAUMFELD_CONNECTOR) += raumfeld.o
 obj-$(CONFIG_MACH_RAUMFELD_SPEAKER)    += raumfeld.o
 obj-$(CONFIG_MACH_ZIPIT2)      += z2.o
 
+obj-$(CONFIG_PXA_SYSTEMS_CPLDS)        += pxa_cplds_irqs.o
 obj-$(CONFIG_TOSA_BT)          += tosa-bt.o
index 958cd6af93842566308a33d11f1799dd78f332d9..1eecf794acd2e4ab0605f5904a6df23575322aad 100644 (file)
@@ -37,7 +37,9 @@
 #define LUB_GP                 __LUB_REG(LUBBOCK_FPGA_PHYS + 0x100)
 
 /* Board specific IRQs */
-#define LUBBOCK_IRQ(x)         (IRQ_BOARD_START + (x))
+#define LUBBOCK_NR_IRQS                IRQ_BOARD_START
+
+#define LUBBOCK_IRQ(x)         (LUBBOCK_NR_IRQS + (x))
 #define LUBBOCK_SD_IRQ         LUBBOCK_IRQ(0)
 #define LUBBOCK_SA1111_IRQ     LUBBOCK_IRQ(1)
 #define LUBBOCK_USB_IRQ                LUBBOCK_IRQ(2)  /* usb connect */
@@ -47,8 +49,7 @@
 #define LUBBOCK_USB_DISC_IRQ   LUBBOCK_IRQ(6)  /* usb disconnect */
 #define LUBBOCK_LAST_IRQ       LUBBOCK_IRQ(6)
 
-#define LUBBOCK_SA1111_IRQ_BASE        (IRQ_BOARD_START + 16)
-#define LUBBOCK_NR_IRQS                (IRQ_BOARD_START + 16 + 55)
+#define LUBBOCK_SA1111_IRQ_BASE        (LUBBOCK_NR_IRQS + 32)
 
 #ifndef __ASSEMBLY__
 extern void lubbock_set_misc_wr(unsigned int mask, unsigned int set);
index 1bfc4e822a4152439a1cd0220ee614147472edc6..e82a7d31104e02f5c21987d198fe229bad8afff5 100644 (file)
 #define MST_PCMCIA_PWR_VCC_50   0x4       /* voltage VCC = 5.0V */
 
 /* board specific IRQs */
-#define MAINSTONE_IRQ(x)       (IRQ_BOARD_START + (x))
+#define MAINSTONE_NR_IRQS      IRQ_BOARD_START
+
+#define MAINSTONE_IRQ(x)       (MAINSTONE_NR_IRQS + (x))
 #define MAINSTONE_MMC_IRQ      MAINSTONE_IRQ(0)
 #define MAINSTONE_USIM_IRQ     MAINSTONE_IRQ(1)
 #define MAINSTONE_USBC_IRQ     MAINSTONE_IRQ(2)
 #define MAINSTONE_S1_STSCHG_IRQ        MAINSTONE_IRQ(14)
 #define MAINSTONE_S1_IRQ       MAINSTONE_IRQ(15)
 
-#define MAINSTONE_NR_IRQS      (IRQ_BOARD_START + 16)
-
 #endif
index d8a1be619f21c7578bbde5fa37d00907fa80e353..4ac9ab80d24bdf147c23a4f31ab85d76cb4a82c5 100644 (file)
@@ -12,6 +12,7 @@
  *  published by the Free Software Foundation.
  */
 #include <linux/gpio.h>
+#include <linux/gpio/machine.h>
 #include <linux/module.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
@@ -123,84 +124,6 @@ void lubbock_set_misc_wr(unsigned int mask, unsigned int set)
 }
 EXPORT_SYMBOL(lubbock_set_misc_wr);
 
-static unsigned long lubbock_irq_enabled;
-
-static void lubbock_mask_irq(struct irq_data *d)
-{
-       int lubbock_irq = (d->irq - LUBBOCK_IRQ(0));
-       LUB_IRQ_MASK_EN = (lubbock_irq_enabled &= ~(1 << lubbock_irq));
-}
-
-static void lubbock_unmask_irq(struct irq_data *d)
-{
-       int lubbock_irq = (d->irq - LUBBOCK_IRQ(0));
-       /* the irq can be acknowledged only if deasserted, so it's done here */
-       LUB_IRQ_SET_CLR &= ~(1 << lubbock_irq);
-       LUB_IRQ_MASK_EN = (lubbock_irq_enabled |= (1 << lubbock_irq));
-}
-
-static struct irq_chip lubbock_irq_chip = {
-       .name           = "FPGA",
-       .irq_ack        = lubbock_mask_irq,
-       .irq_mask       = lubbock_mask_irq,
-       .irq_unmask     = lubbock_unmask_irq,
-};
-
-static void lubbock_irq_handler(unsigned int irq, struct irq_desc *desc)
-{
-       unsigned long pending = LUB_IRQ_SET_CLR & lubbock_irq_enabled;
-       do {
-               /* clear our parent irq */
-               desc->irq_data.chip->irq_ack(&desc->irq_data);
-               if (likely(pending)) {
-                       irq = LUBBOCK_IRQ(0) + __ffs(pending);
-                       generic_handle_irq(irq);
-               }
-               pending = LUB_IRQ_SET_CLR & lubbock_irq_enabled;
-       } while (pending);
-}
-
-static void __init lubbock_init_irq(void)
-{
-       int irq;
-
-       pxa25x_init_irq();
-
-       /* setup extra lubbock irqs */
-       for (irq = LUBBOCK_IRQ(0); irq <= LUBBOCK_LAST_IRQ; irq++) {
-               irq_set_chip_and_handler(irq, &lubbock_irq_chip,
-                                        handle_level_irq);
-               set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
-       }
-
-       irq_set_chained_handler(PXA_GPIO_TO_IRQ(0), lubbock_irq_handler);
-       irq_set_irq_type(PXA_GPIO_TO_IRQ(0), IRQ_TYPE_EDGE_FALLING);
-}
-
-#ifdef CONFIG_PM
-
-static void lubbock_irq_resume(void)
-{
-       LUB_IRQ_MASK_EN = lubbock_irq_enabled;
-}
-
-static struct syscore_ops lubbock_irq_syscore_ops = {
-       .resume = lubbock_irq_resume,
-};
-
-static int __init lubbock_irq_device_init(void)
-{
-       if (machine_is_lubbock()) {
-               register_syscore_ops(&lubbock_irq_syscore_ops);
-               return 0;
-       }
-       return -ENODEV;
-}
-
-device_initcall(lubbock_irq_device_init);
-
-#endif
-
 static int lubbock_udc_is_connected(void)
 {
        return (LUB_MISC_RD & (1 << 9)) == 0;
@@ -383,11 +306,38 @@ static struct platform_device lubbock_flash_device[2] = {
        },
 };
 
+static struct resource lubbock_cplds_resources[] = {
+       [0] = {
+               .start  = LUBBOCK_FPGA_PHYS + 0xc0,
+               .end    = LUBBOCK_FPGA_PHYS + 0xe0 - 1,
+               .flags  = IORESOURCE_MEM,
+       },
+       [1] = {
+               .start  = PXA_GPIO_TO_IRQ(0),
+               .end    = PXA_GPIO_TO_IRQ(0),
+               .flags  = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
+       },
+       [2] = {
+               .start  = LUBBOCK_IRQ(0),
+               .end    = LUBBOCK_IRQ(6),
+               .flags  = IORESOURCE_IRQ,
+       },
+};
+
+static struct platform_device lubbock_cplds_device = {
+       .name           = "pxa_cplds_irqs",
+       .id             = -1,
+       .resource       = &lubbock_cplds_resources[0],
+       .num_resources  = 3,
+};
+
+
 static struct platform_device *devices[] __initdata = {
        &sa1111_device,
        &smc91x_device,
        &lubbock_flash_device[0],
        &lubbock_flash_device[1],
+       &lubbock_cplds_device,
 };
 
 static struct pxafb_mode_info sharp_lm8v31_mode = {
@@ -648,7 +598,7 @@ MACHINE_START(LUBBOCK, "Intel DBPXA250 Development Platform (aka Lubbock)")
        /* Maintainer: MontaVista Software Inc. */
        .map_io         = lubbock_map_io,
        .nr_irqs        = LUBBOCK_NR_IRQS,
-       .init_irq       = lubbock_init_irq,
+       .init_irq       = pxa25x_init_irq,
        .handle_irq     = pxa25x_handle_irq,
        .init_time      = pxa_timer_init,
        .init_machine   = lubbock_init,
index 78b84c0dfc79e63f173e3d299ea397d59bcf2115..2c0658cf6be261f7a7a6ea89409b0d3f52e5f0eb 100644 (file)
@@ -13,6 +13,7 @@
  *  published by the Free Software Foundation.
  */
 #include <linux/gpio.h>
+#include <linux/gpio/machine.h>
 #include <linux/init.h>
 #include <linux/platform_device.h>
 #include <linux/syscore_ops.h>
@@ -122,92 +123,6 @@ static unsigned long mainstone_pin_config[] = {
        GPIO1_GPIO | WAKEUP_ON_EDGE_BOTH,
 };
 
-static unsigned long mainstone_irq_enabled;
-
-static void mainstone_mask_irq(struct irq_data *d)
-{
-       int mainstone_irq = (d->irq - MAINSTONE_IRQ(0));
-       MST_INTMSKENA = (mainstone_irq_enabled &= ~(1 << mainstone_irq));
-}
-
-static void mainstone_unmask_irq(struct irq_data *d)
-{
-       int mainstone_irq = (d->irq - MAINSTONE_IRQ(0));
-       /* the irq can be acknowledged only if deasserted, so it's done here */
-       MST_INTSETCLR &= ~(1 << mainstone_irq);
-       MST_INTMSKENA = (mainstone_irq_enabled |= (1 << mainstone_irq));
-}
-
-static struct irq_chip mainstone_irq_chip = {
-       .name           = "FPGA",
-       .irq_ack        = mainstone_mask_irq,
-       .irq_mask       = mainstone_mask_irq,
-       .irq_unmask     = mainstone_unmask_irq,
-};
-
-static void mainstone_irq_handler(unsigned int irq, struct irq_desc *desc)
-{
-       unsigned long pending = MST_INTSETCLR & mainstone_irq_enabled;
-       do {
-               /* clear useless edge notification */
-               desc->irq_data.chip->irq_ack(&desc->irq_data);
-               if (likely(pending)) {
-                       irq = MAINSTONE_IRQ(0) + __ffs(pending);
-                       generic_handle_irq(irq);
-               }
-               pending = MST_INTSETCLR & mainstone_irq_enabled;
-       } while (pending);
-}
-
-static void __init mainstone_init_irq(void)
-{
-       int irq;
-
-       pxa27x_init_irq();
-
-       /* setup extra Mainstone irqs */
-       for(irq = MAINSTONE_IRQ(0); irq <= MAINSTONE_IRQ(15); irq++) {
-               irq_set_chip_and_handler(irq, &mainstone_irq_chip,
-                                        handle_level_irq);
-               if (irq == MAINSTONE_IRQ(10) || irq == MAINSTONE_IRQ(14))
-                       set_irq_flags(irq, IRQF_VALID | IRQF_PROBE | IRQF_NOAUTOEN);
-               else
-                       set_irq_flags(irq, IRQF_VALID | IRQF_PROBE);
-       }
-       set_irq_flags(MAINSTONE_IRQ(8), 0);
-       set_irq_flags(MAINSTONE_IRQ(12), 0);
-
-       MST_INTMSKENA = 0;
-       MST_INTSETCLR = 0;
-
-       irq_set_chained_handler(PXA_GPIO_TO_IRQ(0), mainstone_irq_handler);
-       irq_set_irq_type(PXA_GPIO_TO_IRQ(0), IRQ_TYPE_EDGE_FALLING);
-}
-
-#ifdef CONFIG_PM
-
-static void mainstone_irq_resume(void)
-{
-       MST_INTMSKENA = mainstone_irq_enabled;
-}
-
-static struct syscore_ops mainstone_irq_syscore_ops = {
-       .resume = mainstone_irq_resume,
-};
-
-static int __init mainstone_irq_device_init(void)
-{
-       if (machine_is_mainstone())
-               register_syscore_ops(&mainstone_irq_syscore_ops);
-
-       return 0;
-}
-
-device_initcall(mainstone_irq_device_init);
-
-#endif
-
-
 static struct resource smc91x_resources[] = {
        [0] = {
                .start  = (MST_ETH_PHYS + 0x300),
@@ -487,11 +402,37 @@ static struct platform_device mst_gpio_keys_device = {
        },
 };
 
+static struct resource mst_cplds_resources[] = {
+       [0] = {
+               .start  = MST_FPGA_PHYS + 0xc0,
+               .end    = MST_FPGA_PHYS + 0xe0 - 1,
+               .flags  = IORESOURCE_MEM,
+       },
+       [1] = {
+               .start  = PXA_GPIO_TO_IRQ(0),
+               .end    = PXA_GPIO_TO_IRQ(0),
+               .flags  = IORESOURCE_IRQ | IORESOURCE_IRQ_LOWEDGE,
+       },
+       [2] = {
+               .start  = MAINSTONE_IRQ(0),
+               .end    = MAINSTONE_IRQ(15),
+               .flags  = IORESOURCE_IRQ,
+       },
+};
+
+static struct platform_device mst_cplds_device = {
+       .name           = "pxa_cplds_irqs",
+       .id             = -1,
+       .resource       = &mst_cplds_resources[0],
+       .num_resources  = 3,
+};
+
 static struct platform_device *platform_devices[] __initdata = {
        &smc91x_device,
        &mst_flash_device[0],
        &mst_flash_device[1],
        &mst_gpio_keys_device,
+       &mst_cplds_device,
 };
 
 static struct pxaohci_platform_data mainstone_ohci_platform_data = {
@@ -718,7 +659,7 @@ MACHINE_START(MAINSTONE, "Intel HCDDBBVA0 Development Platform (aka Mainstone)")
        .atag_offset    = 0x100,        /* BLOB boot parameter setting */
        .map_io         = mainstone_map_io,
        .nr_irqs        = MAINSTONE_NR_IRQS,
-       .init_irq       = mainstone_init_irq,
+       .init_irq       = pxa27x_init_irq,
        .handle_irq     = pxa27x_handle_irq,
        .init_time      = pxa_timer_init,
        .init_machine   = mainstone_init,
diff --git a/arch/arm/mach-pxa/pxa_cplds_irqs.c b/arch/arm/mach-pxa/pxa_cplds_irqs.c
new file mode 100644 (file)
index 0000000..2385052
--- /dev/null
@@ -0,0 +1,200 @@
+/*
+ * Intel Reference Systems cplds
+ *
+ * Copyright (C) 2014 Robert Jarzmik
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Cplds motherboard driver, supporting lubbock and mainstone SoC board.
+ */
+
+#include <linux/bitops.h>
+#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/mfd/core.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+
+#define FPGA_IRQ_MASK_EN 0x0
+#define FPGA_IRQ_SET_CLR 0x10
+
+#define CPLDS_NB_IRQ   32
+
+struct cplds {
+       void __iomem *base;
+       int irq;
+       unsigned int irq_mask;
+       struct gpio_desc *gpio0;
+       struct irq_domain *irqdomain;
+};
+
+static irqreturn_t cplds_irq_handler(int in_irq, void *d)
+{
+       struct cplds *fpga = d;
+       unsigned long pending;
+       unsigned int bit;
+
+       pending = readl(fpga->base + FPGA_IRQ_SET_CLR) & fpga->irq_mask;
+       for_each_set_bit(bit, &pending, CPLDS_NB_IRQ)
+               generic_handle_irq(irq_find_mapping(fpga->irqdomain, bit));
+
+       return IRQ_HANDLED;
+}
+
+static void cplds_irq_mask_ack(struct irq_data *d)
+{
+       struct cplds *fpga = irq_data_get_irq_chip_data(d);
+       unsigned int cplds_irq = irqd_to_hwirq(d);
+       unsigned int set, bit = BIT(cplds_irq);
+
+       fpga->irq_mask &= ~bit;
+       writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
+       set = readl(fpga->base + FPGA_IRQ_SET_CLR);
+       writel(set & ~bit, fpga->base + FPGA_IRQ_SET_CLR);
+}
+
+static void cplds_irq_unmask(struct irq_data *d)
+{
+       struct cplds *fpga = irq_data_get_irq_chip_data(d);
+       unsigned int cplds_irq = irqd_to_hwirq(d);
+       unsigned int bit = BIT(cplds_irq);
+
+       fpga->irq_mask |= bit;
+       writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
+}
+
+static struct irq_chip cplds_irq_chip = {
+       .name           = "pxa_cplds",
+       .irq_mask_ack   = cplds_irq_mask_ack,
+       .irq_unmask     = cplds_irq_unmask,
+       .flags          = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_SKIP_SET_WAKE,
+};
+
+static int cplds_irq_domain_map(struct irq_domain *d, unsigned int irq,
+                                  irq_hw_number_t hwirq)
+{
+       struct cplds *fpga = d->host_data;
+
+       irq_set_chip_and_handler(irq, &cplds_irq_chip, handle_level_irq);
+       irq_set_chip_data(irq, fpga);
+
+       return 0;
+}
+
+static const struct irq_domain_ops cplds_irq_domain_ops = {
+       .xlate = irq_domain_xlate_twocell,
+       .map = cplds_irq_domain_map,
+};
+
+static int cplds_resume(struct platform_device *pdev)
+{
+       struct cplds *fpga = platform_get_drvdata(pdev);
+
+       writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
+
+       return 0;
+}
+
+static int cplds_probe(struct platform_device *pdev)
+{
+       struct resource *res;
+       struct cplds *fpga;
+       int ret;
+       int base_irq;
+       unsigned long irqflags = 0;
+
+       fpga = devm_kzalloc(&pdev->dev, sizeof(*fpga), GFP_KERNEL);
+       if (!fpga)
+               return -ENOMEM;
+
+       res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
+       if (res) {
+               fpga->irq = (unsigned int)res->start;
+               irqflags = res->flags;
+       }
+       if (!fpga->irq)
+               return -ENODEV;
+
+       base_irq = platform_get_irq(pdev, 1);
+       if (base_irq < 0)
+               base_irq = 0;
+
+       res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+       fpga->base = devm_ioremap_resource(&pdev->dev, res);
+       if (IS_ERR(fpga->base))
+               return PTR_ERR(fpga->base);
+
+       platform_set_drvdata(pdev, fpga);
+
+       writel(fpga->irq_mask, fpga->base + FPGA_IRQ_MASK_EN);
+       writel(0, fpga->base + FPGA_IRQ_SET_CLR);
+
+       ret = devm_request_irq(&pdev->dev, fpga->irq, cplds_irq_handler,
+                              irqflags, dev_name(&pdev->dev), fpga);
+       if (ret == -ENOSYS)
+               return -EPROBE_DEFER;
+
+       if (ret) {
+               dev_err(&pdev->dev, "couldn't request main irq%d: %d\n",
+                       fpga->irq, ret);
+               return ret;
+       }
+
+       irq_set_irq_wake(fpga->irq, 1);
+       fpga->irqdomain = irq_domain_add_linear(pdev->dev.of_node,
+                                              CPLDS_NB_IRQ,
+                                              &cplds_irq_domain_ops, fpga);
+       if (!fpga->irqdomain)
+               return -ENODEV;
+
+       if (base_irq) {
+               ret = irq_create_strict_mappings(fpga->irqdomain, base_irq, 0,
+                                                CPLDS_NB_IRQ);
+               if (ret) {
+                       dev_err(&pdev->dev, "couldn't create the irq mapping %d..%d\n",
+                               base_irq, base_irq + CPLDS_NB_IRQ);
+                       return ret;
+               }
+       }
+
+       return 0;
+}
+
+static int cplds_remove(struct platform_device *pdev)
+{
+       struct cplds *fpga = platform_get_drvdata(pdev);
+
+       irq_set_chip_and_handler(fpga->irq, NULL, NULL);
+
+       return 0;
+}
+
+static const struct of_device_id cplds_id_table[] = {
+       { .compatible = "intel,lubbock-cplds-irqs", },
+       { .compatible = "intel,mainstone-cplds-irqs", },
+       { }
+};
+MODULE_DEVICE_TABLE(of, cplds_id_table);
+
+static struct platform_driver cplds_driver = {
+       .driver         = {
+               .name   = "pxa_cplds_irqs",
+               .of_match_table = of_match_ptr(cplds_id_table),
+       },
+       .probe          = cplds_probe,
+       .remove         = cplds_remove,
+       .resume         = cplds_resume,
+};
+
+module_platform_driver(cplds_driver);
+
+MODULE_DESCRIPTION("PXA Cplds interrupts driver");
+MODULE_AUTHOR("Robert Jarzmik <robert.jarzmik@free.fr>");
+MODULE_LICENSE("GPL");
index b07d8860207323e302a86234f9bf7cbbeac827a4..b0dcbe28f78cbd9fd73b399655b3b08b59938c8b 100644 (file)
@@ -83,6 +83,13 @@ static void rk3288_slp_mode_set(int level)
                     SGRF_PCLK_WDT_GATE | SGRF_FAST_BOOT_EN
                     | SGRF_PCLK_WDT_GATE_WRITE | SGRF_FAST_BOOT_EN_WRITE);
 
+       /*
+        * The dapswjdp can not auto reset before resume, that cause it may
+        * access some illegal address during resume. Let's disable it before
+        * suspend, and the MASKROM will enable it back.
+        */
+       regmap_write(sgrf_regmap, RK3288_SGRF_CPU_CON0, SGRF_DAPDEVICEEN_WRITE);
+
        /* booting address of resuming system is from this register value */
        regmap_write(sgrf_regmap, RK3288_SGRF_FAST_BOOT_ADDR,
                     rk3288_bootram_phy);
index 03ff31d8282d07bd4fcd54dcde67b6ee7d7069ef..3e8d39c0c3d5f09c78d38a830f56f5bfe1990c26 100644 (file)
@@ -55,6 +55,10 @@ static inline void rockchip_suspend_init(void)
 #define SGRF_FAST_BOOT_EN              BIT(8)
 #define SGRF_FAST_BOOT_EN_WRITE                BIT(24)
 
+#define RK3288_SGRF_CPU_CON0           (0x40)
+#define SGRF_DAPDEVICEEN               BIT(0)
+#define SGRF_DAPDEVICEEN_WRITE         BIT(16)
+
 #define RK3288_CRU_MODE_CON            0x50
 #define RK3288_CRU_SEL0_CON            0x60
 #define RK3288_CRU_SEL1_CON            0x64
index d360ec044b66e0b990c03f2569da3e07dbffc59d..b6cf3b449428960d28590f5cdad6e331d2239080 100644 (file)
 #include "pm.h"
 
 #define RK3288_GRF_SOC_CON0 0x244
+#define RK3288_TIMER6_7_PHYS 0xff810000
 
 static void __init rockchip_timer_init(void)
 {
        if (of_machine_is_compatible("rockchip,rk3288")) {
                struct regmap *grf;
+               void __iomem *reg_base;
+
+               /*
+                * Most/all uboot versions for rk3288 don't enable timer7
+                * which is needed for the architected timer to work.
+                * So make sure it is running during early boot.
+                */
+               reg_base = ioremap(RK3288_TIMER6_7_PHYS, SZ_16K);
+               if (reg_base) {
+                       writel(0, reg_base + 0x30);
+                       writel(0xffffffff, reg_base + 0x20);
+                       writel(0xffffffff, reg_base + 0x24);
+                       writel(1, reg_base + 0x30);
+                       dsb();
+                       iounmap(reg_base);
+               } else {
+                       pr_err("rockchip: could not map timer7 registers\n");
+               }
 
                /*
                 * Disable auto jtag/sdmmc switching that causes issues
index 09c5fe3d30c2c220b6f17433111cff4898d0bd7c..7e7583ddd6076d7cc88fd7c8f9a53c71662263a2 100644 (file)
@@ -1878,7 +1878,7 @@ struct dma_map_ops iommu_coherent_ops = {
  * arm_iommu_attach_device function.
  */
 struct dma_iommu_mapping *
-arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
+arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, u64 size)
 {
        unsigned int bits = size >> PAGE_SHIFT;
        unsigned int bitmap_size = BITS_TO_LONGS(bits) * sizeof(long);
@@ -1886,6 +1886,10 @@ arm_iommu_create_mapping(struct bus_type *bus, dma_addr_t base, size_t size)
        int extensions = 1;
        int err = -ENOMEM;
 
+       /* currently only 32-bit DMA address space is supported */
+       if (size > DMA_BIT_MASK(32) + 1)
+               return ERR_PTR(-ERANGE);
+
        if (!bitmap_size)
                return ERR_PTR(-EINVAL);
 
@@ -2057,13 +2061,6 @@ static bool arm_setup_iommu_dma_ops(struct device *dev, u64 dma_base, u64 size,
        if (!iommu)
                return false;
 
-       /*
-        * currently arm_iommu_create_mapping() takes a max of size_t
-        * for size param. So check this limit for now.
-        */
-       if (size > SIZE_MAX)
-               return false;
-
        mapping = arm_iommu_create_mapping(dev->bus, dma_base, size);
        if (IS_ERR(mapping)) {
                pr_warn("Failed to create %llu-byte IOMMU mapping for device %s\n",
index 6333d9c178757fe4f365b8e765b2a9ea75e2b80b..0d629b8f973fc2ca63aacb59e5baaf718b194543 100644 (file)
@@ -276,7 +276,7 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
         * If we're in an interrupt or have no user
         * context, we must not take the fault..
         */
-       if (in_atomic() || !mm)
+       if (faulthandler_disabled() || !mm)
                goto no_context;
 
        if (user_mode(regs))
index b98895d9fe57cc4bde62392a3f7144b9ab23c70f..ee8dfa793989785488a306a9edd8b7899f3f1f3b 100644 (file)
@@ -59,6 +59,7 @@ void *kmap_atomic(struct page *page)
        void *kmap;
        int type;
 
+       preempt_disable();
        pagefault_disable();
        if (!PageHighMem(page))
                return page_address(page);
@@ -121,6 +122,7 @@ void __kunmap_atomic(void *kvaddr)
                kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
        }
        pagefault_enable();
+       preempt_enable();
 }
 EXPORT_SYMBOL(__kunmap_atomic);
 
@@ -130,6 +132,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
        int idx, type;
        struct page *page = pfn_to_page(pfn);
 
+       preempt_disable();
        pagefault_disable();
        if (!PageHighMem(page))
                return page_address(page);
index 4e6ef896c6195db73f770957e9df619a0be05e06..7186382672b5eec605cba5ff491a7019914d304b 100644 (file)
@@ -1112,22 +1112,22 @@ void __init sanity_check_meminfo(void)
                        }
 
                        /*
-                        * Find the first non-section-aligned page, and point
+                        * Find the first non-pmd-aligned page, and point
                         * memblock_limit at it. This relies on rounding the
-                        * limit down to be section-aligned, which happens at
-                        * the end of this function.
+                        * limit down to be pmd-aligned, which happens at the
+                        * end of this function.
                         *
                         * With this algorithm, the start or end of almost any
-                        * bank can be non-section-aligned. The only exception
-                        * is that the start of the bank 0 must be section-
+                        * bank can be non-pmd-aligned. The only exception is
+                        * that the start of the bank 0 must be section-
                         * aligned, since otherwise memory would need to be
                         * allocated when mapping the start of bank 0, which
                         * occurs before any free memory is mapped.
                         */
                        if (!memblock_limit) {
-                               if (!IS_ALIGNED(block_start, SECTION_SIZE))
+                               if (!IS_ALIGNED(block_start, PMD_SIZE))
                                        memblock_limit = block_start;
-                               else if (!IS_ALIGNED(block_end, SECTION_SIZE))
+                               else if (!IS_ALIGNED(block_end, PMD_SIZE))
                                        memblock_limit = arm_lowmem_limit;
                        }
 
@@ -1137,12 +1137,12 @@ void __init sanity_check_meminfo(void)
        high_memory = __va(arm_lowmem_limit - 1) + 1;
 
        /*
-        * Round the memblock limit down to a section size.  This
+        * Round the memblock limit down to a pmd size.  This
         * helps to ensure that we will allocate memory from the
-        * last full section, which should be mapped.
+        * last full pmd, which should be mapped.
         */
        if (memblock_limit)
-               memblock_limit = round_down(memblock_limit, SECTION_SIZE);
+               memblock_limit = round_down(memblock_limit, PMD_SIZE);
        if (!memblock_limit)
                memblock_limit = arm_lowmem_limit;
 
index aa0519eed6986c9af3b9b560663ea6e7b04f7c1a..774ef1323554bd54ad74a5caa14155921906560d 100644 (file)
@@ -22,8 +22,6 @@
  *
  * These are the low level assembler for performing cache and TLB
  * functions on the arm1020.
- *
- *  CONFIG_CPU_ARM1020_CPU_IDLE -> nohlt
  */
 #include <linux/linkage.h>
 #include <linux/init.h>
index bff4c7f70fd6a992d0587b99f1303a08776988b7..ae3c27b71594d7d57cb40965efe9aecb07c82b9f 100644 (file)
@@ -22,8 +22,6 @@
  *
  * These are the low level assembler for performing cache and TLB
  * functions on the arm1020e.
- *
- *  CONFIG_CPU_ARM1020_CPU_IDLE -> nohlt
  */
 #include <linux/linkage.h>
 #include <linux/init.h>
index ede8c54ab4aa751d9619b6fec3248cac4255ee40..32a47cc19076c1eaef60ff4a523c89da81094455 100644 (file)
@@ -441,9 +441,6 @@ ENTRY(cpu_arm925_set_pte_ext)
        .type   __arm925_setup, #function
 __arm925_setup:
        mov     r0, #0
-#if defined(CONFIG_CPU_ICACHE_STREAMING_DISABLE)
-        orr     r0,r0,#1 << 7
-#endif
 
        /* Transparent on, D-cache clean & flush mode. See  NOTE2 above */
         orr     r0,r0,#1 << 1                  @ transparent mode on
index e494d6d6acbe8f38f316a586f474d7b7cfc3c0c0..92e08bf37aad940b8f7da40644ef88bcbf56aa31 100644 (file)
@@ -602,7 +602,6 @@ __\name\()_proc_info:
                PMD_SECT_AP_WRITE | \
                PMD_SECT_AP_READ
        initfn  __feroceon_setup, __\name\()_proc_info
-       .long __feroceon_setup
        .long   cpu_arch_name
        .long   cpu_elf_name
        .long   HWCAP_SWP|HWCAP_HALF|HWCAP_THUMB|HWCAP_FAST_MULT|HWCAP_EDSP
index e1268f90502682c75dfd7a98e6ed15272ea8d512..e0e23582c8b4e4687209a219c0d2bbc86b88bbd9 100644 (file)
@@ -54,6 +54,7 @@
 #define SEEN_DATA              (1 << (BPF_MEMWORDS + 3))
 
 #define FLAG_NEED_X_RESET      (1 << 0)
+#define FLAG_IMM_OVERFLOW      (1 << 1)
 
 struct jit_ctx {
        const struct bpf_prog *skf;
@@ -293,6 +294,15 @@ static u16 imm_offset(u32 k, struct jit_ctx *ctx)
        /* PC in ARM mode == address of the instruction + 8 */
        imm = offset - (8 + ctx->idx * 4);
 
+       if (imm & ~0xfff) {
+               /*
+                * literal pool is too far, signal it into flags. we
+                * can only detect it on the second pass unfortunately.
+                */
+               ctx->flags |= FLAG_IMM_OVERFLOW;
+               return 0;
+       }
+
        return imm;
 }
 
@@ -449,10 +459,21 @@ static inline void emit_udiv(u8 rd, u8 rm, u8 rn, struct jit_ctx *ctx)
                return;
        }
 #endif
-       if (rm != ARM_R0)
-               emit(ARM_MOV_R(ARM_R0, rm), ctx);
+
+       /*
+        * For BPF_ALU | BPF_DIV | BPF_K instructions, rm is ARM_R4
+        * (r_A) and rn is ARM_R0 (r_scratch) so load rn first into
+        * ARM_R1 to avoid accidentally overwriting ARM_R0 with rm
+        * before using it as a source for ARM_R1.
+        *
+        * For BPF_ALU | BPF_DIV | BPF_X rm is ARM_R4 (r_A) and rn is
+        * ARM_R5 (r_X) so there is no particular register overlap
+        * issues.
+        */
        if (rn != ARM_R1)
                emit(ARM_MOV_R(ARM_R1, rn), ctx);
+       if (rm != ARM_R0)
+               emit(ARM_MOV_R(ARM_R0, rm), ctx);
 
        ctx->seen |= SEEN_CALL;
        emit_mov_i(ARM_R3, (u32)jit_udiv, ctx);
@@ -855,6 +876,14 @@ b_epilogue:
                default:
                        return -1;
                }
+
+               if (ctx->flags & FLAG_IMM_OVERFLOW)
+                       /*
+                        * this instruction generated an overflow when
+                        * trying to access the literal pool, so
+                        * delegate this filter to the kernel interpreter.
+                        */
+                       return -1;
        }
 
        /* compute offsets only during the first pass */
@@ -917,7 +946,14 @@ void bpf_jit_compile(struct bpf_prog *fp)
        ctx.idx = 0;
 
        build_prologue(&ctx);
-       build_body(&ctx);
+       if (build_body(&ctx) < 0) {
+#if __LINUX_ARM_ARCH__ < 7
+               if (ctx.imm_count)
+                       kfree(ctx.imms);
+#endif
+               bpf_jit_binary_free(header);
+               goto out;
+       }
        build_epilogue(&ctx);
 
        flush_icache_range((u32)ctx.target, (u32)(ctx.target + ctx.idx));
index 224081ccc92fa6516cd67a95e4a5871aee6420a0..7d0f07020c809598c8a5ea292093d3676c083548 100644 (file)
@@ -272,6 +272,7 @@ void xen_arch_pre_suspend(void) { }
 void xen_arch_post_suspend(int suspend_cancelled) { }
 void xen_timer_resume(void) { }
 void xen_arch_resume(void) { }
+void xen_arch_suspend(void) { }
 
 
 /* In the hypervisor.S file. */
index c138b95a8356836929c0f2781c80933da4727953..351c95bda89e5215a9ca525171509bfb4f831bee 100644 (file)
                        clock-output-names = "juno_mb:clk25mhz";
                };
 
+               v2m_refclk1mhz: refclk1mhz {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <1000000>;
+                       clock-output-names = "juno_mb:refclk1mhz";
+               };
+
+               v2m_refclk32khz: refclk32khz {
+                       compatible = "fixed-clock";
+                       #clock-cells = <0>;
+                       clock-frequency = <32768>;
+                       clock-output-names = "juno_mb:refclk32khz";
+               };
+
                motherboard {
                        compatible = "arm,vexpress,v2p-p1", "simple-bus";
                        #address-cells = <2>;  /* SMB chipselect number and offset */
                                #size-cells = <1>;
                                ranges = <0 3 0 0x200000>;
 
+                               v2m_sysctl: sysctl@020000 {
+                                       compatible = "arm,sp810", "arm,primecell";
+                                       reg = <0x020000 0x1000>;
+                                       clocks = <&v2m_refclk32khz>, <&v2m_refclk1mhz>, <&mb_clk24mhz>;
+                                       clock-names = "refclk", "timclk", "apb_pclk";
+                                       #clock-cells = <1>;
+                                       clock-output-names = "timerclken0", "timerclken1", "timerclken2", "timerclken3";
+                               };
+
                                mmci@050000 {
                                        compatible = "arm,pl180", "arm,primecell";
                                        reg = <0x050000 0x1000>;
                                        compatible = "arm,sp804", "arm,primecell";
                                        reg = <0x110000 0x10000>;
                                        interrupts = <9>;
-                                       clocks = <&mb_clk24mhz>, <&soc_smc50mhz>;
-                                       clock-names = "timclken1", "apb_pclk";
+                                       clocks = <&v2m_sysctl 0>, <&v2m_sysctl 1>, <&mb_clk24mhz>;
+                                       clock-names = "timclken1", "timclken2", "apb_pclk";
                                };
 
                                v2m_timer23: timer@120000 {
                                        compatible = "arm,sp804", "arm,primecell";
                                        reg = <0x120000 0x10000>;
                                        interrupts = <9>;
-                                       clocks = <&mb_clk24mhz>, <&soc_smc50mhz>;
-                                       clock-names = "timclken1", "apb_pclk";
+                                       clocks = <&v2m_sysctl 2>, <&v2m_sysctl 3>, <&mb_clk24mhz>;
+                                       clock-names = "timclken1", "timclken2", "apb_pclk";
                                };
 
                                rtc@170000 {
index 43d54017b779d4e211462b8bebe2604025bb08ea..d0ab012fa379eb97c6e43ebad83ee18185d2b598 100644 (file)
@@ -16,7 +16,8 @@
 #include "mt8173.dtsi"
 
 / {
-       model = "mediatek,mt8173-evb";
+       model = "MediaTek MT8173 evaluation board";
+       compatible = "mediatek,mt8173-evb", "mediatek,mt8173";
 
        aliases {
                serial0 = &uart0;
index 9499199924aebd4d6b27ced325d69918b641ddf3..6a37c3c6b11d39acc0db9d142652abcb18d21b9c 100644 (file)
@@ -147,13 +147,21 @@ static int chksum_final(struct shash_desc *desc, u8 *out)
 {
        struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
 
+       put_unaligned_le32(ctx->crc, out);
+       return 0;
+}
+
+static int chksumc_final(struct shash_desc *desc, u8 *out)
+{
+       struct chksum_desc_ctx *ctx = shash_desc_ctx(desc);
+
        put_unaligned_le32(~ctx->crc, out);
        return 0;
 }
 
 static int __chksum_finup(u32 crc, const u8 *data, unsigned int len, u8 *out)
 {
-       put_unaligned_le32(~crc32_arm64_le_hw(crc, data, len), out);
+       put_unaligned_le32(crc32_arm64_le_hw(crc, data, len), out);
        return 0;
 }
 
@@ -199,6 +207,14 @@ static int crc32_cra_init(struct crypto_tfm *tfm)
 {
        struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
 
+       mctx->key = 0;
+       return 0;
+}
+
+static int crc32c_cra_init(struct crypto_tfm *tfm)
+{
+       struct chksum_ctx *mctx = crypto_tfm_ctx(tfm);
+
        mctx->key = ~0;
        return 0;
 }
@@ -229,7 +245,7 @@ static struct shash_alg crc32c_alg = {
        .setkey                 =       chksum_setkey,
        .init                   =       chksum_init,
        .update                 =       chksumc_update,
-       .final                  =       chksum_final,
+       .final                  =       chksumc_final,
        .finup                  =       chksumc_finup,
        .digest                 =       chksumc_digest,
        .descsize               =       sizeof(struct chksum_desc_ctx),
@@ -241,7 +257,7 @@ static struct shash_alg crc32c_alg = {
                .cra_alignmask          =       0,
                .cra_ctxsize            =       sizeof(struct chksum_ctx),
                .cra_module             =       THIS_MODULE,
-               .cra_init               =       crc32_cra_init,
+               .cra_init               =       crc32c_cra_init,
        }
 };
 
index 114e7cc5de8c09b4eb75f6b11294e9c689d1ebbc..aefda9868627bde843227d1074e7cc9b17004298 100644 (file)
@@ -74,6 +74,9 @@ static int sha1_ce_finup(struct shash_desc *desc, const u8 *data,
 
 static int sha1_ce_final(struct shash_desc *desc, u8 *out)
 {
+       struct sha1_ce_state *sctx = shash_desc_ctx(desc);
+
+       sctx->finalize = 0;
        kernel_neon_begin_partial(16);
        sha1_base_do_finalize(desc, (sha1_block_fn *)sha1_ce_transform);
        kernel_neon_end();
index 1340e44c048beab91279b6dd317f10590a6f06b9..7cd587564a4176e902f12c7c7043b714caab48ea 100644 (file)
@@ -75,6 +75,9 @@ static int sha256_ce_finup(struct shash_desc *desc, const u8 *data,
 
 static int sha256_ce_final(struct shash_desc *desc, u8 *out)
 {
+       struct sha256_ce_state *sctx = shash_desc_ctx(desc);
+
+       sctx->finalize = 0;
        kernel_neon_begin_partial(28);
        sha256_base_do_finalize(desc, (sha256_block_fn *)sha2_ce_transform);
        kernel_neon_end();
index 71f19c4dc0dee8c8a74bbdea252f73432e0b1aa5..0fa47c4275cb561af6292761ee9645c9dbef0e59 100644 (file)
@@ -114,7 +114,7 @@ do {                                                                        \
 #define read_barrier_depends()         do { } while(0)
 #define smp_read_barrier_depends()     do { } while(0)
 
-#define set_mb(var, value)     do { var = value; smp_mb(); } while (0)
+#define smp_store_mb(var, value)       do { WRITE_ONCE(var, value); smp_mb(); } while (0)
 #define nop()          asm volatile("nop");
 
 #define smp_mb__before_atomic()        smp_mb()
index 5f750dc96e0fd64123851ac787659f5953bc71e5..74069b3bd919c7ff3c5722f9f2c1dce212abd3fd 100644 (file)
@@ -58,7 +58,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
        if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))
                return -EFAULT;
 
-       pagefault_disable();    /* implies preempt_disable() */
+       pagefault_disable();
 
        switch (op) {
        case FUTEX_OP_SET:
@@ -85,7 +85,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr)
                ret = -ENOSYS;
        }
 
-       pagefault_enable();     /* subsumes preempt_enable() */
+       pagefault_enable();
 
        if (!ret) {
                switch (cmp) {
index 540f7c0aea8250a082c718c8b97f9c1c8c151169..7116d3973058200943148d2b2f7a4fe9ac0b7901 100644 (file)
@@ -170,6 +170,7 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size);
 #define ioremap(addr, size)            __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
 #define ioremap_nocache(addr, size)    __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
 #define ioremap_wc(addr, size)         __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC))
+#define ioremap_wt(addr, size)         __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE))
 #define iounmap                                __iounmap
 
 /*
index 7ebcd31ce51cae9be1030f706e5ecb03a7af7e2c..225ec3524fbfc6ed29ae1d8d1d5041569a886089 100644 (file)
@@ -18,7 +18,7 @@ extern struct cpu_topology cpu_topology[NR_CPUS];
 #define topology_physical_package_id(cpu)      (cpu_topology[cpu].cluster_id)
 #define topology_core_id(cpu)          (cpu_topology[cpu].core_id)
 #define topology_core_cpumask(cpu)     (&cpu_topology[cpu].core_sibling)
-#define topology_thread_cpumask(cpu)   (&cpu_topology[cpu].thread_sibling)
+#define topology_sibling_cpumask(cpu)  (&cpu_topology[cpu].thread_sibling)
 
 void init_cpu_topology(void);
 void store_cpu_topology(unsigned int cpuid);
index 21033bba939051b7db11e415983d7b10fcc485ca..28f8365edc4c43edd00043f38d89f8ae8be6476f 100644 (file)
@@ -24,7 +24,6 @@
 #include <asm/cacheflush.h>
 #include <asm/alternative.h>
 #include <asm/cpufeature.h>
-#include <asm/insn.h>
 #include <linux/stop_machine.h>
 
 extern struct alt_instr __alt_instructions[], __alt_instructions_end[];
@@ -34,48 +33,6 @@ struct alt_region {
        struct alt_instr *end;
 };
 
-/*
- * Decode the imm field of a b/bl instruction, and return the byte
- * offset as a signed value (so it can be used when computing a new
- * branch target).
- */
-static s32 get_branch_offset(u32 insn)
-{
-       s32 imm = aarch64_insn_decode_immediate(AARCH64_INSN_IMM_26, insn);
-
-       /* sign-extend the immediate before turning it into a byte offset */
-       return (imm << 6) >> 4;
-}
-
-static u32 get_alt_insn(u8 *insnptr, u8 *altinsnptr)
-{
-       u32 insn;
-
-       aarch64_insn_read(altinsnptr, &insn);
-
-       /* Stop the world on instructions we don't support... */
-       BUG_ON(aarch64_insn_is_cbz(insn));
-       BUG_ON(aarch64_insn_is_cbnz(insn));
-       BUG_ON(aarch64_insn_is_bcond(insn));
-       /* ... and there is probably more. */
-
-       if (aarch64_insn_is_b(insn) || aarch64_insn_is_bl(insn)) {
-               enum aarch64_insn_branch_type type;
-               unsigned long target;
-
-               if (aarch64_insn_is_b(insn))
-                       type = AARCH64_INSN_BRANCH_NOLINK;
-               else
-                       type = AARCH64_INSN_BRANCH_LINK;
-
-               target = (unsigned long)altinsnptr + get_branch_offset(insn);
-               insn = aarch64_insn_gen_branch_imm((unsigned long)insnptr,
-                                                  target, type);
-       }
-
-       return insn;
-}
-
 static int __apply_alternatives(void *alt_region)
 {
        struct alt_instr *alt;
@@ -83,9 +40,6 @@ static int __apply_alternatives(void *alt_region)
        u8 *origptr, *replptr;
 
        for (alt = region->begin; alt < region->end; alt++) {
-               u32 insn;
-               int i;
-
                if (!cpus_have_cap(alt->cpufeature))
                        continue;
 
@@ -95,12 +49,7 @@ static int __apply_alternatives(void *alt_region)
 
                origptr = (u8 *)&alt->orig_offset + alt->orig_offset;
                replptr = (u8 *)&alt->alt_offset + alt->alt_offset;
-
-               for (i = 0; i < alt->alt_len; i += sizeof(insn)) {
-                       insn = get_alt_insn(origptr + i, replptr + i);
-                       aarch64_insn_write(origptr + i, insn);
-               }
-
+               memcpy(origptr, replptr, alt->alt_len);
                flush_icache_range((uintptr_t)origptr,
                                   (uintptr_t)(origptr + alt->alt_len));
        }
index 23f25acf43a9021ee9b4ad0f79e3b3e5d6ef49c4..cce18c85d2e8edc6edcf23d13ef13e99f6cbf835 100644 (file)
@@ -1315,15 +1315,15 @@ static int armpmu_device_probe(struct platform_device *pdev)
        if (!cpu_pmu)
                return -ENODEV;
 
-       irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
-       if (!irqs)
-               return -ENOMEM;
-
        /* Don't bother with PPIs; they're already affine */
        irq = platform_get_irq(pdev, 0);
        if (irq >= 0 && irq_is_percpu(irq))
                return 0;
 
+       irqs = kcalloc(pdev->num_resources, sizeof(*irqs), GFP_KERNEL);
+       if (!irqs)
+               return -ENOMEM;
+
        for (i = 0; i < pdev->num_resources; ++i) {
                struct device_node *dn;
                int cpu;
index 74c256744b254e09c989cabcd9072aee93c9fd6d..f3d6221cd5bdd4c7bf59fd99c71415d3922e2572 100644 (file)
@@ -328,10 +328,12 @@ static int ptdump_init(void)
                        for (j = 0; j < pg_level[i].num; j++)
                                pg_level[i].mask |= pg_level[i].bits[j].mask;
 
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
        address_markers[VMEMMAP_START_NR].start_address =
                                (unsigned long)virt_to_page(PAGE_OFFSET);
        address_markers[VMEMMAP_END_NR].start_address =
                                (unsigned long)virt_to_page(high_memory);
+#endif
 
        pe = debugfs_create_file("kernel_page_tables", 0400, NULL, NULL,
                                 &ptdump_fops);
index 96da13167d4a5c77564952a6d3a0fdce35d6580f..0948d327d013651c7b9978023139ea9cd89ecaeb 100644 (file)
@@ -211,7 +211,7 @@ static int __kprobes do_page_fault(unsigned long addr, unsigned int esr,
         * If we're in an interrupt or have no user context, we must not take
         * the fault.
         */
-       if (in_atomic() || !mm)
+       if (faulthandler_disabled() || !mm)
                goto no_context;
 
        if (user_mode(regs))
index edba042b23259c6f4b642a842fc1a2eb740c59b3..dc6a4842683aa500b9a5fc3af41caec349472902 100644 (file)
@@ -487,7 +487,7 @@ emit_cond_jmp:
                        return -EINVAL;
                }
 
-               imm64 = (u64)insn1.imm << 32 | imm;
+               imm64 = (u64)insn1.imm << 32 | (u32)imm;
                emit_a64_mov_i64(dst, imm64, ctx);
 
                return 1;
index 962a6aeab787827462312ddf92627c2f8e7083f1..366bbeaeb405d67daa35adac5ff22a4ecfc7e125 100644 (file)
@@ -70,8 +70,6 @@ extern unsigned long __cmpxchg_u64_unsupported_on_32bit_kernels(
    if something tries to do an invalid cmpxchg().  */
 extern void __cmpxchg_called_with_bad_pointer(void);
 
-#define __HAVE_ARCH_CMPXCHG 1
-
 static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
                                      unsigned long new, int size)
 {
index 4f5ec2bb71727279a952843051f44e3297c7b222..e998ff5d8e1a540aa8274e07c9bc8f7443ed7633 100644 (file)
@@ -296,6 +296,7 @@ extern void __iounmap(void __iomem *addr);
        __iounmap(addr)
 
 #define ioremap_wc ioremap_nocache
+#define ioremap_wt ioremap_nocache
 
 #define cached(addr) P1SEGADDR(addr)
 #define uncached(addr) P2SEGADDR(addr)
index a46f7cf3e1eab23d4cdfc224d21fe571917ef413..68cf638faf4867aef2d92b91f9ab48770100f132 100644 (file)
@@ -97,7 +97,8 @@ static inline __kernel_size_t __copy_from_user(void *to,
  * @x:   Value to copy to user space.
  * @ptr: Destination address, in user space.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * This macro copies a single simple value from kernel space to user
  * space.  It supports simple types like char and int, but not larger
@@ -116,7 +117,8 @@ static inline __kernel_size_t __copy_from_user(void *to,
  * @x:   Variable to store result.
  * @ptr: Source address, in user space.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * This macro copies a single simple variable from user space to kernel
  * space.  It supports simple types like char and int, but not larger
@@ -136,7 +138,8 @@ static inline __kernel_size_t __copy_from_user(void *to,
  * @x:   Value to copy to user space.
  * @ptr: Destination address, in user space.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * This macro copies a single simple value from kernel space to user
  * space.  It supports simple types like char and int, but not larger
@@ -158,7 +161,8 @@ static inline __kernel_size_t __copy_from_user(void *to,
  * @x:   Variable to store result.
  * @ptr: Source address, in user space.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * This macro copies a single simple variable from user space to kernel
  * space.  It supports simple types like char and int, but not larger
index d223a8b57c1eaad282289e75089654153ab598d6..c03533937a9f0aa273a75c76ecb66f2731b2d39c 100644 (file)
 #include <linux/pagemap.h>
 #include <linux/kdebug.h>
 #include <linux/kprobes.h>
+#include <linux/uaccess.h>
 
 #include <asm/mmu_context.h>
 #include <asm/sysreg.h>
 #include <asm/tlb.h>
-#include <asm/uaccess.h>
 
 #ifdef CONFIG_KPROBES
 static inline int notify_page_fault(struct pt_regs *regs, int trap)
@@ -81,7 +81,7 @@ asmlinkage void do_page_fault(unsigned long ecr, struct pt_regs *regs)
         * If we're in an interrupt or have no user context, we must
         * not take the fault...
         */
-       if (in_atomic() || !mm || regs->sr & SYSREG_BIT(GM))
+       if (faulthandler_disabled() || !mm || regs->sr & SYSREG_BIT(GM))
                goto no_context;
 
        local_irq_enable();
index 4e8ad0523118d631ea24f6b9f8fd1c3ffb123194..6abebe82d4e93ed0329f271cd54e2af5c1bc38d2 100644 (file)
@@ -10,6 +10,7 @@
 #include <linux/compiler.h>
 #include <linux/types.h>
 #include <asm/byteorder.h>
+#include <asm/def_LPBlackfin.h>
 
 #define __raw_readb bfin_read8
 #define __raw_readw bfin_read16
index 83f12f2ed9e31b8705ed4f8f5ec1c4cbd8822fb4..3066d40a6db14425c162d399d89e5c6db66786fe 100644 (file)
@@ -8,7 +8,7 @@
 #include <linux/interrupt.h>
 #include <linux/module.h>
 #include <linux/wait.h>
-#include <asm/uaccess.h>
+#include <linux/uaccess.h>
 #include <arch/system.h>
 
 extern int find_fixup_code(struct pt_regs *);
@@ -109,11 +109,11 @@ do_page_fault(unsigned long address, struct pt_regs *regs,
        info.si_code = SEGV_MAPERR;
 
        /*
-        * If we're in an interrupt or "atomic" operation or have no
+        * If we're in an interrupt, have pagefaults disabled or have no
         * user context, we must not take the fault.
         */
 
-       if (in_atomic() || !mm)
+       if (faulthandler_disabled() || !mm)
                goto no_context;
 
        if (user_mode(regs))
index 0b78bc89e8402f867fcdbb31e85ff2cffa693494..a31b63ec4930547d306a94bebcb28b9bce48364f 100644 (file)
@@ -17,6 +17,8 @@
 
 #ifdef __KERNEL__
 
+#define ARCH_HAS_IOREMAP_WT
+
 #include <linux/types.h>
 #include <asm/virtconvert.h>
 #include <asm/string.h>
@@ -265,7 +267,7 @@ static inline void __iomem *ioremap_nocache(unsigned long physaddr, unsigned lon
        return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
 }
 
-static inline void __iomem *ioremap_writethrough(unsigned long physaddr, unsigned long size)
+static inline void __iomem *ioremap_wt(unsigned long physaddr, unsigned long size)
 {
        return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
 }
index ec4917ddf67872aa46b60c6b067b0a67ec5417a4..61d99767fe1691e70286bf8d52a05aee506bbccc 100644 (file)
@@ -19,9 +19,9 @@
 #include <linux/kernel.h>
 #include <linux/ptrace.h>
 #include <linux/hardirq.h>
+#include <linux/uaccess.h>
 
 #include <asm/pgtable.h>
-#include <asm/uaccess.h>
 #include <asm/gdb-stub.h>
 
 /*****************************************************************************/
@@ -78,7 +78,7 @@ asmlinkage void do_page_fault(int datammu, unsigned long esr0, unsigned long ear
         * If we're in an interrupt or have no user
         * context, we must not take the fault..
         */
-       if (in_atomic() || !mm)
+       if (faulthandler_disabled() || !mm)
                goto no_context;
 
        if (user_mode(__frame))
index bed9a9bd3c10c84e004c845839f0ad53c0565e45..785344bbdc07c360e81768c8472336bebd0baa3e 100644 (file)
@@ -42,6 +42,7 @@ void *kmap_atomic(struct page *page)
        unsigned long paddr;
        int type;
 
+       preempt_disable();
        pagefault_disable();
        type = kmap_atomic_idx_push();
        paddr = page_to_phys(page);
@@ -85,5 +86,6 @@ void __kunmap_atomic(void *kvaddr)
        }
        kmap_atomic_idx_pop();
        pagefault_enable();
+       preempt_enable();
 }
 EXPORT_SYMBOL(__kunmap_atomic);
index 9e7802911a57f22b01d9f6852d3c11b2f34c1869..a6e34e2acbbaf6cc07f377db35faa7c744ba23e3 100644 (file)
@@ -64,7 +64,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
  *  looks just like atomic_cmpxchg on our arch currently with a bunch of
  *  variable casting.
  */
-#define __HAVE_ARCH_CMPXCHG 1
 
 #define cmpxchg(ptr, old, new)                                 \
 ({                                                             \
index e4127e4d6a5bbde7f1d0ec42017df98353177c93..f000a382bc7f62f28dfe980ab184904c70681914 100644 (file)
@@ -36,7 +36,8 @@
  * @addr: User space pointer to start of block to check
  * @size: Size of block to check
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * Checks if a pointer to a block of memory in user space is valid.
  *
index f6769eb2bbf9b5ff8774017ac0eb332fcc7309d9..843ba435e43bc285f20da4b5c47e262d7714525f 100644 (file)
@@ -77,12 +77,7 @@ do {                                                                 \
        ___p1;                                                          \
 })
 
-/*
- * XXX check on this ---I suspect what Linus really wants here is
- * acquire vs release semantics but we can't discuss this stuff with
- * Linus just yet.  Grrr...
- */
-#define set_mb(var, value)     do { (var) = (value); mb(); } while (0)
+#define smp_store_mb(var, value)       do { WRITE_ONCE(var, value); mb(); } while (0)
 
 /*
  * The group barrier in front of the rsm & ssm are necessary to ensure
index e3b3556e2e1bb8d32b67a1b63e4de3e2708af0b8..a8687b1d8906912ab458b9640db72c8655115a3f 100644 (file)
@@ -1,6 +1,4 @@
 #ifndef __IA64_INTR_REMAPPING_H
 #define __IA64_INTR_REMAPPING_H
 #define irq_remapping_enabled 0
-#define dmar_alloc_hwirq       create_irq
-#define dmar_free_hwirq                destroy_irq
 #endif
index 6437ca21f61b49d0b05fe9d124ed0e5aa8391d92..3ad8f698836346793816a79ada16035f40b8197e 100644 (file)
@@ -53,7 +53,7 @@ void build_cpu_to_node_map(void);
 #define topology_physical_package_id(cpu)      (cpu_data(cpu)->socket_id)
 #define topology_core_id(cpu)                  (cpu_data(cpu)->core_id)
 #define topology_core_cpumask(cpu)             (&cpu_core_map[cpu])
-#define topology_thread_cpumask(cpu)           (&per_cpu(cpu_sibling_map, cpu))
+#define topology_sibling_cpumask(cpu)          (&per_cpu(cpu_sibling_map, cpu))
 #endif
 
 extern void arch_fix_phys_package_id(int num, u32 slot);
index f35109b1d9076ccd996f1308dbffbdf788cd7475..a0e3620f8f133f7ea798ae9458abf10cc168f3c3 100644 (file)
@@ -61,8 +61,6 @@ extern void ia64_xchg_called_with_bad_pointer(void);
  * indicated by comparing RETURN with OLD.
  */
 
-#define __HAVE_ARCH_CMPXCHG 1
-
 /*
  * This function doesn't exist, so you'll get a linker error
  * if something tries to do an invalid cmpxchg().
index 9dd7464f8c1742848011ce1397ef8a79f1e60d4f..d70bf15c690a53227142b6027ce7bb116475ed9d 100644 (file)
@@ -165,7 +165,7 @@ static struct irq_chip dmar_msi_type = {
        .irq_retrigger = ia64_msi_retrigger_irq,
 };
 
-static int
+static void
 msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
 {
        struct irq_cfg *cfg = irq_cfg + irq;
@@ -186,21 +186,29 @@ msi_compose_msg(struct pci_dev *pdev, unsigned int irq, struct msi_msg *msg)
                MSI_DATA_LEVEL_ASSERT |
                MSI_DATA_DELIVERY_FIXED |
                MSI_DATA_VECTOR(cfg->vector);
-       return 0;
 }
 
-int arch_setup_dmar_msi(unsigned int irq)
+int dmar_alloc_hwirq(int id, int node, void *arg)
 {
-       int ret;
+       int irq;
        struct msi_msg msg;
 
-       ret = msi_compose_msg(NULL, irq, &msg);
-       if (ret < 0)
-               return ret;
-       dmar_msi_write(irq, &msg);
-       irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
-                                     "edge");
-       return 0;
+       irq = create_irq();
+       if (irq > 0) {
+               irq_set_handler_data(irq, arg);
+               irq_set_chip_and_handler_name(irq, &dmar_msi_type,
+                                             handle_edge_irq, "edge");
+               msi_compose_msg(NULL, irq, &msg);
+               dmar_msi_write(irq, &msg);
+       }
+
+       return irq;
+}
+
+void dmar_free_hwirq(int irq)
+{
+       irq_set_handler_data(irq, NULL);
+       destroy_irq(irq);
 }
 #endif /* CONFIG_INTEL_IOMMU */
 
index 15051e9c2c6f98f3f2e8743739f10b63f795be3a..b054c5c6e7137cf85ba00f8c60fa33b8719b0c07 100644 (file)
@@ -127,7 +127,7 @@ int smp_num_siblings = 1;
 volatile int ia64_cpu_to_sapicid[NR_CPUS];
 EXPORT_SYMBOL(ia64_cpu_to_sapicid);
 
-static volatile cpumask_t cpu_callin_map;
+static cpumask_t cpu_callin_map;
 
 struct smp_boot_data smp_boot_data __initdata;
 
@@ -477,6 +477,7 @@ do_boot_cpu (int sapicid, int cpu, struct task_struct *idle)
        for (timeout = 0; timeout < 100000; timeout++) {
                if (cpumask_test_cpu(cpu, &cpu_callin_map))
                        break;  /* It has booted */
+               barrier(); /* Make sure we re-read cpu_callin_map */
                udelay(100);
        }
        Dprintk("\n");
index ba5ba7accd0d6bb4dbab34f7fc307c4306347f4a..70b40d1205a6b9b3ec7efcbc9e60ec64c2eff712 100644 (file)
 #include <linux/kprobes.h>
 #include <linux/kdebug.h>
 #include <linux/prefetch.h>
+#include <linux/uaccess.h>
 
 #include <asm/pgtable.h>
 #include <asm/processor.h>
-#include <asm/uaccess.h>
 
 extern int die(char *, struct pt_regs *, long);
 
@@ -96,7 +96,7 @@ ia64_do_page_fault (unsigned long address, unsigned long isr, struct pt_regs *re
        /*
         * If we're in an interrupt or have no user context, we must not take the fault..
         */
-       if (in_atomic() || !mm)
+       if (faulthandler_disabled() || !mm)
                goto no_context;
 
 #ifdef CONFIG_VIRTUAL_MEM_MAP
index d4e162d35b3467b9d7814769129237583f8cb100..7cc3be9fa7c65a0dd700dfd30c04cc7be822922d 100644 (file)
@@ -478,9 +478,16 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
 
 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
 {
-       struct pci_controller *controller = bridge->bus->sysdata;
-
-       ACPI_COMPANION_SET(&bridge->dev, controller->companion);
+       /*
+        * We pass NULL as parent to pci_create_root_bus(), so if it is not NULL
+        * here, pci_create_root_bus() has been called by someone else and
+        * sysdata is likely to be different from what we expect.  Let it go in
+        * that case.
+        */
+       if (!bridge->dev.parent) {
+               struct pci_controller *controller = bridge->bus->sysdata;
+               ACPI_COMPANION_SET(&bridge->dev, controller->companion);
+       }
        return 0;
 }
 
index de651db20b43938ca7289404dfa1eb4b7940e0c8..14bf9b739dd2c8eb643a805263dc88741fd3b1de 100644 (file)
@@ -107,8 +107,6 @@ __xchg_local(unsigned long x, volatile void *ptr, int size)
        ((__typeof__(*(ptr)))__xchg_local((unsigned long)(x), (ptr),    \
                        sizeof(*(ptr))))
 
-#define __HAVE_ARCH_CMPXCHG    1
-
 static inline unsigned long
 __cmpxchg_u32(volatile unsigned int *p, unsigned int old, unsigned int new)
 {
index 9cc00dbd59cef4096b186fd1ffe5e4b51766f430..0c3f25ee3381d9fad606ea36bb09682d362a87d8 100644 (file)
@@ -68,6 +68,7 @@ static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
 extern void iounmap(volatile void __iomem *addr);
 #define ioremap_nocache(off,size) ioremap(off,size)
 #define ioremap_wc ioremap_nocache
+#define ioremap_wt ioremap_nocache
 
 /*
  * IO bus memory addresses are also 1:1 with the physical address
index 71adff209405e15b052e96d832ba5a26295bb98e..cac7014daef3aa6949d17a72824a757dfdbee5b4 100644 (file)
@@ -91,7 +91,8 @@ static inline void set_fs(mm_segment_t s)
  * @addr: User space pointer to start of block to check
  * @size: Size of block to check
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * Checks if a pointer to a block of memory in user space is valid.
  *
@@ -155,7 +156,8 @@ extern int fixup_exception(struct pt_regs *regs);
  * @x:   Variable to store result.
  * @ptr: Source address, in user space.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * This macro copies a single simple variable from user space to kernel
  * space.  It supports simple types like char and int, but not larger
@@ -175,7 +177,8 @@ extern int fixup_exception(struct pt_regs *regs);
  * @x:   Value to copy to user space.
  * @ptr: Destination address, in user space.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * This macro copies a single simple value from kernel space to user
  * space.  It supports simple types like char and int, but not larger
@@ -194,7 +197,8 @@ extern int fixup_exception(struct pt_regs *regs);
  * @x:   Variable to store result.
  * @ptr: Source address, in user space.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * This macro copies a single simple variable from user space to kernel
  * space.  It supports simple types like char and int, but not larger
@@ -274,7 +278,8 @@ do {                                                                        \
  * @x:   Value to copy to user space.
  * @ptr: Destination address, in user space.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * This macro copies a single simple value from kernel space to user
  * space.  It supports simple types like char and int, but not larger
@@ -568,7 +573,8 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
  * @from: Source address, in kernel space.
  * @n:    Number of bytes to copy.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * Copy data from kernel space to user space.  Caller must check
  * the specified block with access_ok() before calling this function.
@@ -588,7 +594,8 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
  * @from: Source address, in kernel space.
  * @n:    Number of bytes to copy.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * Copy data from kernel space to user space.
  *
@@ -606,7 +613,8 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
  * @from: Source address, in user space.
  * @n:    Number of bytes to copy.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * Copy data from user space to kernel space.  Caller must check
  * the specified block with access_ok() before calling this function.
@@ -626,7 +634,8 @@ unsigned long __generic_copy_from_user(void *, const void __user *, unsigned lon
  * @from: Source address, in user space.
  * @n:    Number of bytes to copy.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * Copy data from user space to kernel space.
  *
@@ -677,7 +686,8 @@ unsigned long clear_user(void __user *mem, unsigned long len);
  * strlen_user: - Get the size of a string in user space.
  * @str: The string to measure.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * Get the size of a NUL-terminated string in user space.
  *
index ce7aea34fdf410857799d9e17e092ff1aa1936e8..c18ddc74ef9a60c0ad8c338213d0f95239d972b9 100644 (file)
@@ -45,7 +45,7 @@ static volatile unsigned long flushcache_cpumask = 0;
 /*
  * For flush_tlb_others()
  */
-static volatile cpumask_t flush_cpumask;
+static cpumask_t flush_cpumask;
 static struct mm_struct *flush_mm;
 static struct vm_area_struct *flush_vma;
 static volatile unsigned long flush_va;
@@ -415,7 +415,7 @@ static void flush_tlb_others(cpumask_t cpumask, struct mm_struct *mm,
         */
        send_IPI_mask(&cpumask, INVALIDATE_TLB_IPI, 0);
 
-       while (!cpumask_empty((cpumask_t*)&flush_cpumask)) {
+       while (!cpumask_empty(&flush_cpumask)) {
                /* nothing. lockup detection does not belong here */
                mb();
        }
@@ -468,7 +468,7 @@ void smp_invalidate_interrupt(void)
                        __flush_tlb_page(va);
                }
        }
-       cpumask_clear_cpu(cpu_id, (cpumask_t*)&flush_cpumask);
+       cpumask_clear_cpu(cpu_id, &flush_cpumask);
 }
 
 /*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/
index e3d4d4890104cc27e2eb9de2f22cb6f53f939c90..8f9875b7933d5582277a777693a2e44c1483d362 100644 (file)
@@ -24,9 +24,9 @@
 #include <linux/vt_kern.h>             /* For unblank_screen() */
 #include <linux/highmem.h>
 #include <linux/module.h>
+#include <linux/uaccess.h>
 
 #include <asm/m32r.h>
-#include <asm/uaccess.h>
 #include <asm/hardirq.h>
 #include <asm/mmu_context.h>
 #include <asm/tlbflush.h>
@@ -111,10 +111,10 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long error_code,
        mm = tsk->mm;
 
        /*
-        * If we're in an interrupt or have no user context or are running in an
-        * atomic region then we must not take the fault..
+        * If we're in an interrupt or have no user context or have pagefaults
+        * disabled then we must not take the fault.
         */
-       if (in_atomic() || !mm)
+       if (faulthandler_disabled() || !mm)
                goto bad_area_nosemaphore;
 
        if (error_code & ACE_USERMODE)
index bc755bc620ad0985d6b2a08e57561354a67a301c..83b1df80f0ac0e69c9cf0662981a24358f29a166 100644 (file)
@@ -90,7 +90,6 @@ extern unsigned long __invalid_cmpxchg_size(volatile void *,
  * indicated by comparing RETURN with OLD.
  */
 #ifdef CONFIG_RMW_INSNS
-#define __HAVE_ARCH_CMPXCHG    1
 
 static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
                                      unsigned long new, int size)
index 8955b40a5dc4304a37c6ff88b328a333f1df13cd..618c85d3c786713c8787043f176eac5d74e13e5e 100644 (file)
@@ -20,6 +20,8 @@
 
 #ifdef __KERNEL__
 
+#define ARCH_HAS_IOREMAP_WT
+
 #include <linux/compiler.h>
 #include <asm/raw_io.h>
 #include <asm/virtconvert.h>
@@ -465,7 +467,7 @@ static inline void __iomem *ioremap_nocache(unsigned long physaddr, unsigned lon
 {
        return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
 }
-static inline void __iomem *ioremap_writethrough(unsigned long physaddr,
+static inline void __iomem *ioremap_wt(unsigned long physaddr,
                                         unsigned long size)
 {
        return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
index a93c8cde4d382fc171798affe7090cc59a60f46d..ad7bd40e67428fe420a71ed7e35018a2bd1236e4 100644 (file)
@@ -3,6 +3,8 @@
 
 #ifdef __KERNEL__
 
+#define ARCH_HAS_IOREMAP_WT
+
 #include <asm/virtconvert.h>
 #include <asm-generic/iomap.h>
 
@@ -153,7 +155,7 @@ static inline void *ioremap_nocache(unsigned long physaddr, unsigned long size)
 {
        return __ioremap(physaddr, size, IOMAP_NOCACHE_SER);
 }
-static inline void *ioremap_writethrough(unsigned long physaddr, unsigned long size)
+static inline void *ioremap_wt(unsigned long physaddr, unsigned long size)
 {
        return __ioremap(physaddr, size, IOMAP_WRITETHROUGH);
 }
index a823cd73dc09e35bdfe683ab0637dd3c19ffdcb1..b5941818346f65dfbaad65823d2e61a59673f44a 100644 (file)
@@ -2,9 +2,6 @@
 #define _M68K_IRQFLAGS_H
 
 #include <linux/types.h>
-#ifdef CONFIG_MMU
-#include <linux/preempt_mask.h>
-#endif
 #include <linux/preempt.h>
 #include <asm/thread_info.h>
 #include <asm/entry.h>
index b2f04aee46ecc2f7a5fb1db26d8e4279f6b6ea2e..6a94cdd0c8308cb70151d477872ccbeda2022ea8 100644 (file)
 #include <linux/ptrace.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
+#include <linux/uaccess.h>
 
 #include <asm/setup.h>
 #include <asm/traps.h>
-#include <asm/uaccess.h>
 #include <asm/pgalloc.h>
 
 extern void die_if_kernel(char *, struct pt_regs *, long);
@@ -81,7 +81,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
         * If we're in an interrupt or have no user
         * context, we must not take the fault..
         */
-       if (in_atomic() || !mm)
+       if (faulthandler_disabled() || !mm)
                goto no_context;
 
        if (user_mode(regs))
index d703d8e26a656c1560202d3c1af6a39ed95771e5..5a696e50793034bfdecefec781f95a402196c99a 100644 (file)
@@ -84,7 +84,7 @@ static inline void fence(void)
 #define read_barrier_depends()         do { } while (0)
 #define smp_read_barrier_depends()     do { } while (0)
 
-#define set_mb(var, value) do { var = value; smp_mb(); } while (0)
+#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); smp_mb(); } while (0)
 
 #define smp_store_release(p, v)                                                \
 do {                                                                   \
index b1bc1be8540f581ab570963f099116507ea751db..be29e3e44321a1aa2d3adcf069522bd5152cf4ff 100644 (file)
@@ -51,8 +51,6 @@ static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
        return old;
 }
 
-#define __HAVE_ARCH_CMPXCHG 1
-
 #define cmpxchg(ptr, o, n)                                             \
        ({                                                              \
                __typeof__(*(ptr)) _o_ = (o);                           \
index d5779b0ec5730a01963a4a86940d1f5eb500084e..9890f21eadbe867ef4c48f5141ce4ceadb54965f 100644 (file)
@@ -160,6 +160,9 @@ extern void __iounmap(void __iomem *addr);
 #define ioremap_wc(offset, size)                \
        __ioremap((offset), (size), _PAGE_WR_COMBINE)
 
+#define ioremap_wt(offset, size)                \
+       __ioremap((offset), (size), 0)
+
 #define iounmap(addr)                           \
        __iounmap(addr)
 
index 2de5dc695a87fa96d41a83e127166a7126d10df0..f57edca63609bf15f1a71bf5a90c8837d6a9fd13 100644 (file)
@@ -105,7 +105,7 @@ int do_page_fault(struct pt_regs *regs, unsigned long address,
 
        mm = tsk->mm;
 
-       if (in_atomic() || !mm)
+       if (faulthandler_disabled() || !mm)
                goto no_context;
 
        if (user_mode(regs))
index d71f621a2c0b92adb5679162d1cd59568440986b..807f1b1c4e6567738f676c935ad21483e4972a32 100644 (file)
@@ -43,7 +43,7 @@ void *kmap_atomic(struct page *page)
        unsigned long vaddr;
        int type;
 
-       /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
+       preempt_disable();
        pagefault_disable();
        if (!PageHighMem(page))
                return page_address(page);
@@ -82,6 +82,7 @@ void __kunmap_atomic(void *kvaddr)
        }
 
        pagefault_enable();
+       preempt_enable();
 }
 EXPORT_SYMBOL(__kunmap_atomic);
 
@@ -95,6 +96,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
        unsigned long vaddr;
        int type;
 
+       preempt_disable();
        pagefault_disable();
 
        type = kmap_atomic_idx_push();
index 940f5fc1d1da13f3988fe99fd69a7d02e0825c8d..39b6315db82ee74bc3aa652460b50c6cdde605a7 100644 (file)
@@ -39,10 +39,10 @@ extern resource_size_t isa_mem_base;
 extern void iounmap(void __iomem *addr);
 
 extern void __iomem *ioremap(phys_addr_t address, unsigned long size);
-#define ioremap_writethrough(addr, size)       ioremap((addr), (size))
 #define ioremap_nocache(addr, size)            ioremap((addr), (size))
 #define ioremap_fullcache(addr, size)          ioremap((addr), (size))
 #define ioremap_wc(addr, size)                 ioremap((addr), (size))
+#define ioremap_wt(addr, size)                 ioremap((addr), (size))
 
 #endif /* CONFIG_MMU */
 
index 62942fd126728688cb8358685802eead5c8b5fcf..331b0d35f89ce301ad9ba876be7322f417d7c909 100644 (file)
@@ -178,7 +178,8 @@ extern long __user_bad(void);
  * @x:   Variable to store result.
  * @ptr: Source address, in user space.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * This macro copies a single simple variable from user space to kernel
  * space.  It supports simple types like char and int, but not larger
@@ -290,7 +291,8 @@ extern long __user_bad(void);
  * @x:   Value to copy to user space.
  * @ptr: Destination address, in user space.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * This macro copies a single simple value from kernel space to user
  * space.  It supports simple types like char and int, but not larger
index d46a5ebb7570e07869ea03b9b995374aa3bff82e..177dfc0036436284d4e016b0987e4516faa7f445 100644 (file)
@@ -107,14 +107,14 @@ void do_page_fault(struct pt_regs *regs, unsigned long address,
        if ((error_code & 0x13) == 0x13 || (error_code & 0x11) == 0x11)
                is_write = 0;
 
-       if (unlikely(in_atomic() || !mm)) {
+       if (unlikely(faulthandler_disabled() || !mm)) {
                if (kernel_mode(regs))
                        goto bad_area_nosemaphore;
 
-               /* in_atomic() in user mode is really bad,
+               /* faulthandler_disabled() in user mode is really bad,
                   as is current->mm == NULL. */
-               pr_emerg("Page fault in user mode with in_atomic(), mm = %p\n",
-                                                                       mm);
+               pr_emerg("Page fault in user mode with faulthandler_disabled(), mm = %p\n",
+                        mm);
                pr_emerg("r15 = %lx  MSR = %lx\n",
                       regs->r15, regs->msr);
                die("Weird page fault", regs, SIGSEGV);
index 5a92576fad927127eb05fbe24833b57e18ba7155..2fcc5a52d84d1c2cf25d0cc45c356ce14549d74e 100644 (file)
@@ -37,7 +37,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
        unsigned long vaddr;
        int idx, type;
 
-       /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
+       preempt_disable();
        pagefault_disable();
        if (!PageHighMem(page))
                return page_address(page);
@@ -63,6 +63,7 @@ void __kunmap_atomic(void *kvaddr)
 
        if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
                pagefault_enable();
+               preempt_enable();
                return;
        }
 
@@ -84,5 +85,6 @@ void __kunmap_atomic(void *kvaddr)
 #endif
        kmap_atomic_idx_pop();
        pagefault_enable();
+       preempt_enable();
 }
 EXPORT_SYMBOL(__kunmap_atomic);
index 5200f649dd4e3005423a81b4c608fc4ac49d2321..ae2dd59050f742c54c49b3f80925bbfc4e67fbbd 100644 (file)
@@ -277,7 +277,7 @@ LDFLAGS                     += -m $(ld-emul)
 ifdef CONFIG_MIPS
 CHECKFLAGS += $(shell $(CC) $(KBUILD_CFLAGS) -dM -E -x c /dev/null | \
        egrep -vw '__GNUC_(|MINOR_|PATCHLEVEL_)_' | \
-       sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/")
+       sed -e "s/^\#define /-D'/" -e "s/ /'='/" -e "s/$$/'/" -e 's/\$$/&&/g')
 ifdef CONFIG_64BIT
 CHECKFLAGS             += -m64
 endif
index e1fe6305113612cf2943bff9307a9579a9da3f69..597899ad5438e3b551a0b190555546e3a0e5b218 100644 (file)
@@ -1,6 +1,7 @@
 /*
  *  Atheros AR71XX/AR724X/AR913X specific prom routines
  *
+ *  Copyright (C) 2015 Laurent Fasnacht <l@libres.ch>
  *  Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
  *  Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
  *
@@ -25,12 +26,14 @@ void __init prom_init(void)
 {
        fw_init_cmdline();
 
+#ifdef CONFIG_BLK_DEV_INITRD
        /* Read the initrd address from the firmware environment */
        initrd_start = fw_getenvl("initrd_start");
        if (initrd_start) {
                initrd_start = KSEG0ADDR(initrd_start);
                initrd_end = initrd_start + fw_getenvl("initrd_size");
        }
+#endif
 }
 
 void __init prom_free_prom_memory(void)
index a73c93c3d44a1069149945cf2732aeed26c918bd..7fc8397d16f21d713ad3e073308f69c75dd88692 100644 (file)
@@ -225,7 +225,7 @@ void __init plat_time_init(void)
        ddr_clk_rate = ath79_get_sys_clk_rate("ddr");
        ref_clk_rate = ath79_get_sys_clk_rate("ref");
 
-       pr_info("Clocks: CPU:%lu.%03luMHz, DDR:%lu.%03luMHz, AHB:%lu.%03luMHz, Ref:%lu.%03luMHz",
+       pr_info("Clocks: CPU:%lu.%03luMHz, DDR:%lu.%03luMHz, AHB:%lu.%03luMHz, Ref:%lu.%03luMHz\n",
                cpu_clk_rate / 1000000, (cpu_clk_rate / 1000) % 1000,
                ddr_clk_rate / 1000000, (ddr_clk_rate / 1000) % 1000,
                ahb_clk_rate / 1000000, (ahb_clk_rate / 1000) % 1000,
index 558e94977942033dc8247bcc510ebb705aa9698a..68f0c5871adcdf51f40380ffbba09b1e5e52202c 100644 (file)
@@ -2,7 +2,6 @@
 # Makefile for the Cobalt micro systems family specific parts of the kernel
 #
 
-obj-y := buttons.o irq.o lcd.o led.o reset.o rtc.o serial.o setup.o time.o
+obj-y := buttons.o irq.o lcd.o led.o mtd.o reset.o rtc.o serial.o setup.o time.o
 
 obj-$(CONFIG_PCI)              += pci.o
-obj-$(CONFIG_MTD_PHYSMAP)      += mtd.o
index 002680648dcb22307f338f2db93af43c372a01ff..b2a577ebce0b08f79650d8ef2bb096fcb6d0a27e 100644 (file)
@@ -194,7 +194,7 @@ CONFIG_USB_WUSB_CBAF=m
 CONFIG_USB_C67X00_HCD=m
 CONFIG_USB_EHCI_HCD=y
 CONFIG_USB_EHCI_ROOT_HUB_TT=y
-CONFIG_USB_ISP1760_HCD=m
+CONFIG_USB_ISP1760=m
 CONFIG_USB_OHCI_HCD=y
 CONFIG_USB_UHCI_HCD=m
 CONFIG_USB_R8A66597_HCD=m
index 2b8bbbcb9be0e9f8c5c6f7dbc1a2c6f692185443..7ecba84656d4951a4196622344338c842997fb0f 100644 (file)
 #define __WEAK_LLSC_MB         "               \n"
 #endif
 
-#define set_mb(var, value) \
-       do { var = value; smp_mb(); } while (0)
+#define smp_store_mb(var, value) \
+       do { WRITE_ONCE(var, value); smp_mb(); } while (0)
 
 #define smp_llsc_mb()  __asm__ __volatile__(__WEAK_LLSC_MB : : :"memory")
 
index 412f945f1f5ec26e78ab228aa2ee5b36cc2362fe..b71ab4a5fd508ea0ee59d9a27b9d15eee0d7180b 100644 (file)
@@ -138,8 +138,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
                __xchg((unsigned long)(x), (ptr), sizeof(*(ptr))));     \
 })
 
-#define __HAVE_ARCH_CMPXCHG 1
-
 #define __cmpxchg_asm(ld, st, m, old, new)                             \
 ({                                                                     \
        __typeof(*(m)) __ret;                                           \
index a594d8ed96980cd1e96c8bf3eb593368203fe53c..f19e890b99d2744ae4984ea54e2ca9e5128ce7fe 100644 (file)
@@ -304,7 +304,7 @@ do {                                                                        \
                                                                        \
        current->thread.abi = &mips_abi;                                \
                                                                        \
-       current->thread.fpu.fcr31 = current_cpu_data.fpu_csr31;         \
+       current->thread.fpu.fcr31 = boot_cpu_data.fpu_csr31;            \
 } while (0)
 
 #endif /* CONFIG_32BIT */
@@ -366,7 +366,7 @@ do {                                                                        \
        else                                                            \
                current->thread.abi = &mips_abi;                        \
                                                                        \
-       current->thread.fpu.fcr31 = current_cpu_data.fpu_csr31;         \
+       current->thread.fpu.fcr31 = boot_cpu_data.fpu_csr31;            \
                                                                        \
        p = personality(current->personality);                          \
        if (p != PER_LINUX32 && p != PER_LINUX)                         \
index 18ae5ddef118c071e1240486e90f08be3e4b0871..c28a8499aec7f4fa18c5bd4c71922812d2ebf143 100644 (file)
 #define _PAGE_PRESENT_SHIFT    0
 #define _PAGE_PRESENT          (1 << _PAGE_PRESENT_SHIFT)
 /* R2 or later cores check for RI/XI support to determine _PAGE_READ */
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 #define _PAGE_WRITE_SHIFT      (_PAGE_PRESENT_SHIFT + 1)
 #define _PAGE_WRITE            (1 << _PAGE_WRITE_SHIFT)
 #else
 #define _PAGE_SPLITTING                (1 << _PAGE_SPLITTING_SHIFT)
 
 /* Only R2 or newer cores have the XI bit */
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 #define _PAGE_NO_EXEC_SHIFT    (_PAGE_SPLITTING_SHIFT + 1)
 #else
 #define _PAGE_GLOBAL_SHIFT     (_PAGE_SPLITTING_SHIFT + 1)
 #define _PAGE_GLOBAL           (1 << _PAGE_GLOBAL_SHIFT)
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
 
 #endif /* CONFIG_64BIT && CONFIG_MIPS_HUGE_TLB_SUPPORT */
 
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
 /* XI - page cannot be executed */
 #ifndef _PAGE_NO_EXEC_SHIFT
 #define _PAGE_NO_EXEC_SHIFT    (_PAGE_MODIFIED_SHIFT + 1)
 #define _PAGE_GLOBAL_SHIFT     (_PAGE_NO_READ_SHIFT + 1)
 #define _PAGE_GLOBAL           (1 << _PAGE_GLOBAL_SHIFT)
 
-#else  /* !CONFIG_CPU_MIPSR2 */
+#else  /* !CONFIG_CPU_MIPSR2 && !CONFIG_CPU_MIPSR6 */
 #define _PAGE_GLOBAL_SHIFT     (_PAGE_MODIFIED_SHIFT + 1)
 #define _PAGE_GLOBAL           (1 << _PAGE_GLOBAL_SHIFT)
-#endif /* CONFIG_CPU_MIPSR2 */
+#endif /* CONFIG_CPU_MIPSR2 || CONFIG_CPU_MIPSR6 */
 
 #define _PAGE_VALID_SHIFT      (_PAGE_GLOBAL_SHIFT + 1)
 #define _PAGE_VALID            (1 << _PAGE_VALID_SHIFT)
  */
 static inline uint64_t pte_to_entrylo(unsigned long pte_val)
 {
-#ifdef CONFIG_CPU_MIPSR2
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
        if (cpu_has_rixi) {
                int sa;
 #ifdef CONFIG_32BIT
index bb02fac9b4fa0188e12b263376f3e1e08f83b022..2b25d1ba1ea037ca82212ec542a165e714ecf29c 100644 (file)
@@ -45,7 +45,7 @@ extern int __cpu_logical_map[NR_CPUS];
 #define SMP_DUMP               0x8
 #define SMP_ASK_C0COUNT                0x10
 
-extern volatile cpumask_t cpu_callin_map;
+extern cpumask_t cpu_callin_map;
 
 /* Mask of CPUs which are currently definitely operating coherently */
 extern cpumask_t cpu_coherent_mask;
index e92d6c4b5ed192305b0b1f1605481f745cfadb10..7163cd7fdd69a622892e4be83acbe0450e8f2af0 100644 (file)
@@ -104,7 +104,6 @@ do {                                                                        \
        if (test_and_clear_tsk_thread_flag(prev, TIF_USEDMSA))          \
                __fpsave = FP_SAVE_VECTOR;                              \
        (last) = resume(prev, next, task_thread_info(next), __fpsave);  \
-       disable_msa();                                                  \
 } while (0)
 
 #define finish_arch_switch(prev)                                       \
@@ -122,6 +121,7 @@ do {                                                                        \
        if (cpu_has_userlocal)                                          \
                write_c0_userlocal(current_thread_info()->tp_value);    \
        __restore_watch();                                              \
+       disable_msa();                                                  \
 } while (0)
 
 #endif /* _ASM_SWITCH_TO_H */
index 3e307ec2afbae2f7308df902f450006bafd006f9..7afda4150a59d928537cddd33e8ed0105d90629b 100644 (file)
@@ -15,7 +15,7 @@
 #define topology_physical_package_id(cpu)      (cpu_data[cpu].package)
 #define topology_core_id(cpu)                  (cpu_data[cpu].core)
 #define topology_core_cpumask(cpu)             (&cpu_core_map[cpu])
-#define topology_thread_cpumask(cpu)           (&cpu_sibling_map[cpu])
+#define topology_sibling_cpumask(cpu)          (&cpu_sibling_map[cpu])
 #endif
 
 #endif /* __ASM_TOPOLOGY_H */
index bf8b32450ef6b4b5c91993f52fa205e79d2e9226..9722357d285471bc1d27e944ba334027b5b35469 100644 (file)
@@ -103,7 +103,8 @@ extern u64 __ua_limit;
  * @addr: User space pointer to start of block to check
  * @size: Size of block to check
  *
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * Checks if a pointer to a block of memory in user space is valid.
  *
@@ -138,7 +139,8 @@ extern u64 __ua_limit;
  * @x:  Value to copy to user space.
  * @ptr: Destination address, in user space.
  *
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * This macro copies a single simple value from kernel space to user
  * space.  It supports simple types like char and int, but not larger
@@ -157,7 +159,8 @@ extern u64 __ua_limit;
  * @x:  Variable to store result.
  * @ptr: Source address, in user space.
  *
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * This macro copies a single simple variable from user space to kernel
  * space.  It supports simple types like char and int, but not larger
@@ -177,7 +180,8 @@ extern u64 __ua_limit;
  * @x:  Value to copy to user space.
  * @ptr: Destination address, in user space.
  *
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * This macro copies a single simple value from kernel space to user
  * space.  It supports simple types like char and int, but not larger
@@ -199,7 +203,8 @@ extern u64 __ua_limit;
  * @x:  Variable to store result.
  * @ptr: Source address, in user space.
  *
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * This macro copies a single simple variable from user space to kernel
  * space.  It supports simple types like char and int, but not larger
@@ -498,7 +503,8 @@ extern void __put_user_unknown(void);
  * @x:  Value to copy to user space.
  * @ptr: Destination address, in user space.
  *
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * This macro copies a single simple value from kernel space to user
  * space.  It supports simple types like char and int, but not larger
@@ -517,7 +523,8 @@ extern void __put_user_unknown(void);
  * @x:  Variable to store result.
  * @ptr: Source address, in user space.
  *
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * This macro copies a single simple variable from user space to kernel
  * space.  It supports simple types like char and int, but not larger
@@ -537,7 +544,8 @@ extern void __put_user_unknown(void);
  * @x:  Value to copy to user space.
  * @ptr: Destination address, in user space.
  *
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * This macro copies a single simple value from kernel space to user
  * space.  It supports simple types like char and int, but not larger
@@ -559,7 +567,8 @@ extern void __put_user_unknown(void);
  * @x:  Variable to store result.
  * @ptr: Source address, in user space.
  *
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * This macro copies a single simple variable from user space to kernel
  * space.  It supports simple types like char and int, but not larger
@@ -815,7 +824,8 @@ extern size_t __copy_user(void *__to, const void *__from, size_t __n);
  * @from: Source address, in kernel space.
  * @n:   Number of bytes to copy.
  *
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * Copy data from kernel space to user space.  Caller must check
  * the specified block with access_ok() before calling this function.
@@ -888,7 +898,8 @@ extern size_t __copy_user_inatomic(void *__to, const void *__from, size_t __n);
  * @from: Source address, in kernel space.
  * @n:   Number of bytes to copy.
  *
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * Copy data from kernel space to user space.
  *
@@ -1075,7 +1086,8 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
  * @from: Source address, in user space.
  * @n:   Number of bytes to copy.
  *
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * Copy data from user space to kernel space.  Caller must check
  * the specified block with access_ok() before calling this function.
@@ -1107,7 +1119,8 @@ extern size_t __copy_in_user_eva(void *__to, const void *__from, size_t __n);
  * @from: Source address, in user space.
  * @n:   Number of bytes to copy.
  *
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * Copy data from user space to kernel space.
  *
@@ -1329,7 +1342,8 @@ strncpy_from_user(char *__to, const char __user *__from, long __len)
  * strlen_user: - Get the size of a string in user space.
  * @str: The string to measure.
  *
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * Get the size of a NUL-terminated string in user space.
  *
@@ -1398,7 +1412,8 @@ static inline long __strnlen_user(const char __user *s, long n)
  * strnlen_user: - Get the size of a string in user space.
  * @str: The string to measure.
  *
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * Get the size of a NUL-terminated string in user space.
  *
index e36515dcd3b29efcdb0014c7dfd4541805eb4e4c..209e5b76c1bce56f02ceeb1fdeffeccc6fe46bd8 100644 (file)
@@ -74,13 +74,12 @@ static inline void cpu_set_fpu_fcsr_mask(struct cpuinfo_mips *c)
 {
        unsigned long sr, mask, fcsr, fcsr0, fcsr1;
 
+       fcsr = c->fpu_csr31;
        mask = FPU_CSR_ALL_X | FPU_CSR_ALL_E | FPU_CSR_ALL_S | FPU_CSR_RM;
 
        sr = read_c0_status();
        __enable_fpu(FPU_AS_IS);
 
-       fcsr = read_32bit_cp1_register(CP1_STATUS);
-
        fcsr0 = fcsr & mask;
        write_32bit_cp1_register(CP1_STATUS, fcsr0);
        fcsr0 = read_32bit_cp1_register(CP1_STATUS);
index be4899f3c393275a992083d6f7e9f19c2fde5b87..4a4d9e067c89427fc34e990586f0e8237a9418ba 100644 (file)
@@ -76,14 +76,6 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
 
        /* Lets see if this is an O32 ELF */
        if (ehdr32->e_ident[EI_CLASS] == ELFCLASS32) {
-               /* FR = 1 for N32 */
-               if (ehdr32->e_flags & EF_MIPS_ABI2)
-                       state->overall_fp_mode = FP_FR1;
-               else
-                       /* Set a good default FPU mode for O32 */
-                       state->overall_fp_mode = cpu_has_mips_r6 ?
-                               FP_FRE : FP_FR0;
-
                if (ehdr32->e_flags & EF_MIPS_FP64) {
                        /*
                         * Set MIPS_ABI_FP_OLD_64 for EF_MIPS_FP64. We will override it
@@ -104,9 +96,6 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
                                  (char *)&abiflags,
                                  sizeof(abiflags));
        } else {
-               /* FR=1 is really the only option for 64-bit */
-               state->overall_fp_mode = FP_FR1;
-
                if (phdr64->p_type != PT_MIPS_ABIFLAGS)
                        return 0;
                if (phdr64->p_filesz < sizeof(abiflags))
@@ -137,6 +126,7 @@ int arch_check_elf(void *_ehdr, bool has_interpreter,
        struct elf32_hdr *ehdr = _ehdr;
        struct mode_req prog_req, interp_req;
        int fp_abi, interp_fp_abi, abi0, abi1, max_abi;
+       bool is_mips64;
 
        if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
                return 0;
@@ -152,10 +142,22 @@ int arch_check_elf(void *_ehdr, bool has_interpreter,
                abi0 = abi1 = fp_abi;
        }
 
-       /* ABI limits. O32 = FP_64A, N32/N64 = FP_SOFT */
-       max_abi = ((ehdr->e_ident[EI_CLASS] == ELFCLASS32) &&
-                  (!(ehdr->e_flags & EF_MIPS_ABI2))) ?
-               MIPS_ABI_FP_64A : MIPS_ABI_FP_SOFT;
+       is_mips64 = (ehdr->e_ident[EI_CLASS] == ELFCLASS64) ||
+                   (ehdr->e_flags & EF_MIPS_ABI2);
+
+       if (is_mips64) {
+               /* MIPS64 code always uses FR=1, thus the default is easy */
+               state->overall_fp_mode = FP_FR1;
+
+               /* Disallow access to the various FPXX & FP64 ABIs */
+               max_abi = MIPS_ABI_FP_SOFT;
+       } else {
+               /* Default to a mode capable of running code expecting FR=0 */
+               state->overall_fp_mode = cpu_has_mips_r6 ? FP_FRE : FP_FR0;
+
+               /* Allow all ABIs we know about */
+               max_abi = MIPS_ABI_FP_64A;
+       }
 
        if ((abi0 > max_abi && abi0 != MIPS_ABI_FP_UNKNOWN) ||
            (abi1 > max_abi && abi1 != MIPS_ABI_FP_UNKNOWN))
index d2bfbc2e8995fba3b6da1ad7a190f0d872ca6fbf..3c8a18a00a65fee62e7cc11068d3866b18b04fd1 100644 (file)
@@ -29,7 +29,7 @@
 int kgdb_early_setup;
 #endif
 
-static unsigned long irq_map[NR_IRQS / BITS_PER_LONG];
+static DECLARE_BITMAP(irq_map, NR_IRQS);
 
 int allocate_irqno(void)
 {
@@ -109,7 +109,7 @@ void __init init_IRQ(void)
 #endif
 }
 
-#ifdef DEBUG_STACKOVERFLOW
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
 static inline void check_stack_overflow(void)
 {
        unsigned long sp;
index d544e774eea6b1b6e0f811099290600b1fc5b534..e933a309f2ea5fa4498153a9b6a3caa0af4a1587 100644 (file)
@@ -176,7 +176,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
 
        __get_user(value, data + 64);
        fcr31 = child->thread.fpu.fcr31;
-       mask = current_cpu_data.fpu_msk31;
+       mask = boot_cpu_data.fpu_msk31;
        child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
 
        /* FIR may not be written.  */
index 06805e09bcd35751857a6f4a1d367d6796163c1b..0b85f827cd1836165fe4d146ec1d543953edeb7b 100644 (file)
@@ -28,12 +28,7 @@ extern void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
 extern int fpcsr_pending(unsigned int __user *fpcsr);
 
 /* Make sure we will not lose FPU ownership */
-#ifdef CONFIG_PREEMPT
-#define lock_fpu_owner()       preempt_disable()
-#define unlock_fpu_owner()     preempt_enable()
-#else
-#define lock_fpu_owner()       pagefault_disable()
-#define unlock_fpu_owner()     pagefault_enable()
-#endif
+#define lock_fpu_owner()       ({ preempt_disable(); pagefault_disable(); })
+#define unlock_fpu_owner()     ({ pagefault_enable(); preempt_enable(); })
 
 #endif /* __SIGNAL_COMMON_H */
index fd528d7ea27867ffed69abf25d3c7b3f374b7f64..336708ae5c5b4c74b75416058feabb4bef5e30b1 100644 (file)
@@ -444,7 +444,7 @@ struct plat_smp_ops bmips5000_smp_ops = {
 static void bmips_wr_vec(unsigned long dst, char *start, char *end)
 {
        memcpy((void *)dst, start, end - start);
-       dma_cache_wback((unsigned long)start, end - start);
+       dma_cache_wback(dst, end - start);
        local_flush_icache_range(dst, dst + (end - start));
        instruction_hazard();
 }
index 7e011f95bb8e14785d447bf49bb1b8141cf0bc08..4251d390b5b66e0c20a7271659a280e448ca0ffe 100644 (file)
@@ -92,7 +92,7 @@ static void __init cps_smp_setup(void)
 #ifdef CONFIG_MIPS_MT_FPAFF
        /* If we have an FPU, enroll ourselves in the FPU-full mask */
        if (cpu_has_fpu)
-               cpu_set(0, mt_fpu_cpumask);
+               cpumask_set_cpu(0, &mt_fpu_cpumask);
 #endif /* CONFIG_MIPS_MT_FPAFF */
 }
 
index 193ace7955fb5eec377b666db534d76173a014c5..faa46ebd9ddae2fc43f20d6ff65f28688f665c4f 100644 (file)
@@ -43,7 +43,7 @@
 #include <asm/time.h>
 #include <asm/setup.h>
 
-volatile cpumask_t cpu_callin_map;     /* Bitmask of started secondaries */
+cpumask_t cpu_callin_map;              /* Bitmask of started secondaries */
 
 int __cpu_number_map[NR_CPUS];         /* Map physical to logical */
 EXPORT_SYMBOL(__cpu_number_map);
@@ -218,8 +218,10 @@ int __cpu_up(unsigned int cpu, struct task_struct *tidle)
        /*
         * Trust is futile.  We should really have timeouts ...
         */
-       while (!cpumask_test_cpu(cpu, &cpu_callin_map))
+       while (!cpumask_test_cpu(cpu, &cpu_callin_map)) {
                udelay(100);
+               schedule();
+       }
 
        synchronise_count_master(cpu);
        return 0;
index ba32e48d4697193f3410edd79153d392ab0d8311..d2d1c1933bc9f598efaa81735d132af1ff12adee 100644 (file)
@@ -269,7 +269,6 @@ static void __show_regs(const struct pt_regs *regs)
         */
        printk("epc   : %0*lx %pS\n", field, regs->cp0_epc,
               (void *) regs->cp0_epc);
-       printk("    %s\n", print_tainted());
        printk("ra    : %0*lx %pS\n", field, regs->regs[31],
               (void *) regs->regs[31]);
 
index 6230f376a44e7ab6f09041c4b805e54e59468489..d5fa3eaf39a106546f52d82ec3e5391302ef8dec 100644 (file)
@@ -2389,7 +2389,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
 {
        unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
        enum emulation_result er = EMULATE_DONE;
-       unsigned long curr_pc;
 
        if (run->mmio.len > sizeof(*gpr)) {
                kvm_err("Bad MMIO length: %d", run->mmio.len);
@@ -2397,11 +2396,6 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
                goto done;
        }
 
-       /*
-        * Update PC and hold onto current PC in case there is
-        * an error and we want to rollback the PC
-        */
-       curr_pc = vcpu->arch.pc;
        er = update_pc(vcpu, vcpu->arch.pending_load_cause);
        if (er == EMULATE_FAIL)
                return er;
@@ -2415,7 +2409,7 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
                if (vcpu->mmio_needed == 2)
                        *gpr = *(int16_t *) run->mmio.data;
                else
-                       *gpr = *(int16_t *) run->mmio.data;
+                       *gpr = *(uint16_t *)run->mmio.data;
 
                break;
        case 1:
index 7d12c0dded3ded2009f85ffd7ed7d6e52f645c0c..77e64942f0048c5aac366c0c80a6cf63f0c656d5 100644 (file)
@@ -34,7 +34,12 @@ LEAF(__strnlen_\func\()_asm)
 FEXPORT(__strnlen_\func\()_nocheck_asm)
        move            v0, a0
        PTR_ADDU        a1, a0                  # stop pointer
-1:     beq             v0, a1, 1f              # limit reached?
+1:
+#ifdef CONFIG_CPU_DADDI_WORKAROUNDS
+       .set            noat
+       li              AT, 1
+#endif
+       beq             v0, a1, 1f              # limit reached?
 .ifeqs "\func", "kernel"
        EX(lb, t0, (v0), .Lfault\@)
 .else
@@ -42,7 +47,13 @@ FEXPORT(__strnlen_\func\()_nocheck_asm)
 .endif
        .set            noreorder
        bnez            t0, 1b
-1:      PTR_ADDIU      v0, 1
+1:
+#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
+        PTR_ADDIU      v0, 1
+#else
+        PTR_ADDU       v0, AT
+       .set            at
+#endif
        .set            reorder
        PTR_SUBU        v0, a0
        jr              ra
index e70c33fdb88153ac6bfdf12a4f632d3b3a26ccb9..f2e8153e44f536213e196002f005bb86da9ef72f 100644 (file)
@@ -3,15 +3,13 @@
 #
 
 obj-y += setup.o init.o cmdline.o env.o time.o reset.o irq.o \
-    bonito-irq.o mem.o machtype.o platform.o
+    bonito-irq.o mem.o machtype.o platform.o serial.o
 obj-$(CONFIG_PCI) += pci.o
 
 #
 # Serial port support
 #
 obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
-loongson-serial-$(CONFIG_SERIAL_8250) := serial.o
-obj-y += $(loongson-serial-m) $(loongson-serial-y)
 obj-$(CONFIG_LOONGSON_UART_BASE) += uart_base.o
 obj-$(CONFIG_LOONGSON_MC146818) += rtc.o
 
index e3c68b5da18da4012de0aaed6363d5a5484d5e41..509877c6e9d908d7bac6110982c7208ab69204af 100644 (file)
@@ -272,7 +272,7 @@ void loongson3_ipi_interrupt(struct pt_regs *regs)
        if (action & SMP_ASK_C0COUNT) {
                BUG_ON(cpu != 0);
                c0count = read_c0_count();
-               for (i = 1; i < loongson_sysconf.nr_cpus; i++)
+               for (i = 1; i < num_possible_cpus(); i++)
                        per_cpu(core0_c0count, i) = c0count;
        }
 }
index d31c537ace1d11b7f742fd9ead71d3f273cfa2d3..22b9b2cb9219fa4e4eb7b9cc340125a4c9aa4053 100644 (file)
@@ -889,7 +889,7 @@ static inline void cop1_cfc(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
                break;
 
        case FPCREG_RID:
-               value = current_cpu_data.fpu_id;
+               value = boot_cpu_data.fpu_id;
                break;
 
        default:
@@ -921,7 +921,7 @@ static inline void cop1_ctc(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
                         (void *)xcp->cp0_epc, MIPSInst_RT(ir), value);
 
                /* Preserve read-only bits.  */
-               mask = current_cpu_data.fpu_msk31;
+               mask = boot_cpu_data.fpu_msk31;
                fcr31 = (value & ~mask) | (fcr31 & mask);
                break;
 
index 0dbb65a51ce5b1c2913cfec00571710e3a0ecb10..2e03ab1735911d202ce82c97b4911b5c1002ed70 100644 (file)
@@ -1372,7 +1372,7 @@ static int probe_scache(void)
        scache_size = addr;
        c->scache.linesz = 16 << ((config & R4K_CONF_SB) >> 22);
        c->scache.ways = 1;
-       c->dcache.waybit = 0;           /* does not matter */
+       c->scache.waybit = 0;           /* does not matter */
 
        return 1;
 }
index 7ff8637e530d7974d002594797d42044505a0467..36c0f26fac6b0780318958a59fc2665a444a10ea 100644 (file)
 #include <linux/module.h>
 #include <linux/kprobes.h>
 #include <linux/perf_event.h>
+#include <linux/uaccess.h>
 
 #include <asm/branch.h>
 #include <asm/mmu_context.h>
-#include <asm/uaccess.h>
 #include <asm/ptrace.h>
 #include <asm/highmem.h>               /* For VMALLOC_END */
 #include <linux/kdebug.h>
@@ -94,7 +94,7 @@ static void __kprobes __do_page_fault(struct pt_regs *regs, unsigned long write,
         * If we're in an interrupt or have no user
         * context, we must not take the fault..
         */
-       if (in_atomic() || !mm)
+       if (faulthandler_disabled() || !mm)
                goto bad_area_nosemaphore;
 
        if (user_mode(regs))
index da815d295239baaaf6e1d1069e2255baa8d75358..11661cbc11a8193f7a9817fb0101fd9483db9f23 100644 (file)
@@ -47,7 +47,7 @@ void *kmap_atomic(struct page *page)
        unsigned long vaddr;
        int idx, type;
 
-       /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
+       preempt_disable();
        pagefault_disable();
        if (!PageHighMem(page))
                return page_address(page);
@@ -72,6 +72,7 @@ void __kunmap_atomic(void *kvaddr)
 
        if (vaddr < FIXADDR_START) { // FIXME
                pagefault_enable();
+               preempt_enable();
                return;
        }
 
@@ -92,6 +93,7 @@ void __kunmap_atomic(void *kvaddr)
 #endif
        kmap_atomic_idx_pop();
        pagefault_enable();
+       preempt_enable();
 }
 EXPORT_SYMBOL(__kunmap_atomic);
 
@@ -104,6 +106,7 @@ void *kmap_atomic_pfn(unsigned long pfn)
        unsigned long vaddr;
        int idx, type;
 
+       preempt_disable();
        pagefault_disable();
 
        type = kmap_atomic_idx_push();
index faa5c9822eccf48bc433f886a9939d93a1112bf1..198a3147dd7d08b78746790c628d2012a65c61ec 100644 (file)
@@ -90,6 +90,7 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
 
        BUG_ON(Page_dcache_dirty(page));
 
+       preempt_disable();
        pagefault_disable();
        idx = (addr >> PAGE_SHIFT) & (FIX_N_COLOURS - 1);
        idx += in_interrupt() ? FIX_N_COLOURS : 0;
@@ -152,6 +153,7 @@ void kunmap_coherent(void)
        write_c0_entryhi(old_ctx);
        local_irq_restore(flags);
        pagefault_enable();
+       preempt_enable();
 }
 
 void copy_user_highpage(struct page *to, struct page *from,
index a27a088e6f9f830f7e3445abab6bae5cd8b60478..08318ecb803a08611adfd0025041c16526d097e2 100644 (file)
@@ -495,7 +495,7 @@ static void r4k_tlb_configure(void)
 
        if (cpu_has_rixi) {
                /*
-                * Enable the no read, no exec bits, and enable large virtual
+                * Enable the no read, no exec bits, and enable large physical
                 * address.
                 */
 #ifdef CONFIG_64BIT
index 5d6139390bf830adf503d67d004a5322d8eb7ad4..e23fdf2a9c80d2f0dbbb498343efb859c08f3b4e 100644 (file)
@@ -681,11 +681,7 @@ static unsigned int get_stack_depth(struct jit_ctx *ctx)
                sp_off += config_enabled(CONFIG_64BIT) ?
                        (ARGS_USED_BY_JIT + 1) * RSIZE : RSIZE;
 
-       /*
-        * Subtract the bytes for the last registers since we only care about
-        * the location on the stack pointer.
-        */
-       return sp_off - RSIZE;
+       return sp_off;
 }
 
 static void build_prologue(struct jit_ctx *ctx)
index e20b02e3ae28be201789dd260ab79a382be944f8..e10d10b9e82a98bf5e53382d88cbc98b769ef53c 100644 (file)
@@ -41,7 +41,7 @@ static irqreturn_t ill_acc_irq_handler(int irq, void *_priv)
                addr, (type >> ILL_ACC_OFF_S) & ILL_ACC_OFF_M,
                type & ILL_ACC_LEN_M);
 
-       rt_memc_w32(REG_ILL_ACC_TYPE, REG_ILL_ACC_TYPE);
+       rt_memc_w32(ILL_INT_STATUS, REG_ILL_ACC_TYPE);
 
        return IRQ_HANDLED;
 }
index 0134db2ad0a850ca5f2e07c912fe24e746dda902..5a2a82148d8d4f633c644f951ca87f252bdce09b 100644 (file)
@@ -130,9 +130,9 @@ struct platform_device ip32_rtc_device = {
        .resource               = ip32_rtc_resources,
 };
 
-+static int __init sgio2_rtc_devinit(void)
+static __init int sgio2_rtc_devinit(void)
 {
        return platform_device_register(&ip32_rtc_device);
 }
 
-device_initcall(sgio2_cmos_devinit);
+device_initcall(sgio2_rtc_devinit);
index 2fbbe4d920aa2efb353ed5fd52babaf309a386db..1ddea5afba09344ba8e807e6ce8f5edc5c26241f 100644 (file)
@@ -75,6 +75,7 @@ static inline void *kmap_atomic(struct page *page)
        unsigned long vaddr;
        int idx, type;
 
+       preempt_disable();
        pagefault_disable();
        if (page < highmem_start_page)
                return page_address(page);
@@ -98,6 +99,7 @@ static inline void __kunmap_atomic(unsigned long vaddr)
 
        if (vaddr < FIXADDR_START) { /* FIXME */
                pagefault_enable();
+               preempt_enable();
                return;
        }
 
@@ -122,6 +124,7 @@ static inline void __kunmap_atomic(unsigned long vaddr)
 
        kmap_atomic_idx_pop();
        pagefault_enable();
+       preempt_enable();
 }
 #endif /* __KERNEL__ */
 
index cc4a2ba9e228998c7ccbd5af24d5e5d9c5c42fb6..07c5b4a3903ba61ede14417e63f0d9b7a630f7db 100644 (file)
@@ -282,6 +282,7 @@ static inline void __iomem *ioremap_nocache(unsigned long offset, unsigned long
 }
 
 #define ioremap_wc ioremap_nocache
+#define ioremap_wt ioremap_nocache
 
 static inline void iounmap(void __iomem *addr)
 {
index 0c2cc5d39c8e37ce1cfe5be191902bc435c41090..4a1d181ed32f7690a82cfcba3eb264f9311b0a0f 100644 (file)
@@ -23,8 +23,8 @@
 #include <linux/interrupt.h>
 #include <linux/init.h>
 #include <linux/vt_kern.h>             /* For unblank_screen() */
+#include <linux/uaccess.h>
 
-#include <asm/uaccess.h>
 #include <asm/pgalloc.h>
 #include <asm/hardirq.h>
 #include <asm/cpu-regs.h>
@@ -168,7 +168,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long fault_code,
         * If we're in an interrupt or have no user
         * context, we must not take the fault..
         */
-       if (in_atomic() || !mm)
+       if (faulthandler_disabled() || !mm)
                goto no_context;
 
        if ((fault_code & MMUFCR_xFC_ACCESS) == MMUFCR_xFC_ACCESS_USR)
index 6e24d7cceb0c1407db80f7354e662f11b5f7d339..c5a62da22cd2eb40b5e0bc16588f23f0cc7ecdd4 100644 (file)
@@ -46,6 +46,7 @@ static inline void iounmap(void __iomem *addr)
 }
 
 #define ioremap_wc ioremap_nocache
+#define ioremap_wt ioremap_nocache
 
 /* Pages to physical address... */
 #define page_to_phys(page)     virt_to_phys(page_to_virt(page))
index 0c9b6afe69e9094815cc1e73084422368b3a2e52..b51878b0c6b87362074c68832c1b4355f7c127cc 100644 (file)
@@ -77,7 +77,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long cause,
         * If we're in an interrupt or have no user
         * context, we must not take the fault..
         */
-       if (in_atomic() || !mm)
+       if (faulthandler_disabled() || !mm)
                goto bad_area_nosemaphore;
 
        if (user_mode(regs))
index de65f66ea64e7538f4f7c431ca3800e86c152e5d..ec2df4bab3022dfc35a83539afcd24207e199f9f 100644 (file)
@@ -142,6 +142,7 @@ static inline void kunmap(struct page *page)
 
 static inline void *kmap_atomic(struct page *page)
 {
+       preempt_disable();
        pagefault_disable();
        return page_address(page);
 }
@@ -150,6 +151,7 @@ static inline void __kunmap_atomic(void *addr)
 {
        flush_kernel_dcache_page_addr(addr);
        pagefault_enable();
+       preempt_enable();
 }
 
 #define kmap_atomic_prot(page, prot)   kmap_atomic(page)
index dbd13354ec414b70a9abb814f62fc94035ccdf08..0a90b965cccbefe172be5dde879b5035b4589685 100644 (file)
@@ -46,8 +46,6 @@ __xchg(unsigned long x, __volatile__ void *ptr, int size)
 #define xchg(ptr, x) \
        ((__typeof__(*(ptr)))__xchg((unsigned long)(x), (ptr), sizeof(*(ptr))))
 
-#define __HAVE_ARCH_CMPXCHG    1
-
 /* bug catcher for when unsupported size is used - won't link */
 extern void __cmpxchg_called_with_bad_pointer(void);
 
index 3391d061eccc02fb64ebff9211cb9146c9f50a1c..78c9fd32c5546b6ec91d591e853d89a761383c92 100644 (file)
@@ -348,6 +348,10 @@ struct pt_regs;    /* forward declaration... */
 
 #define ELF_HWCAP      0
 
+#define STACK_RND_MASK (is_32bit_task() ? \
+                               0x7ff >> (PAGE_SHIFT - 12) : \
+                               0x3ffff >> (PAGE_SHIFT - 12))
+
 struct mm_struct;
 extern unsigned long arch_randomize_brk(struct mm_struct *);
 #define arch_randomize_brk arch_randomize_brk
index 8a488c22a99f7bb3836fc93a23a31561a237fb86..809905a811ed72a543ad257c1394900a4b976af8 100644 (file)
@@ -181,9 +181,12 @@ int dump_task_fpu (struct task_struct *tsk, elf_fpregset_t *r)
        return 1;
 }
 
+/*
+ * Copy architecture-specific thread state
+ */
 int
 copy_thread(unsigned long clone_flags, unsigned long usp,
-           unsigned long arg, struct task_struct *p)
+           unsigned long kthread_arg, struct task_struct *p)
 {
        struct pt_regs *cregs = &(p->thread.regs);
        void *stack = task_stack_page(p);
@@ -195,11 +198,10 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
        extern void * const child_return;
 
        if (unlikely(p->flags & PF_KTHREAD)) {
+               /* kernel thread */
                memset(cregs, 0, sizeof(struct pt_regs));
                if (!usp) /* idle thread */
                        return 0;
-
-               /* kernel thread */
                /* Must exit via ret_from_kernel_thread in order
                 * to call schedule_tail()
                 */
@@ -215,7 +217,7 @@ copy_thread(unsigned long clone_flags, unsigned long usp,
 #else
                cregs->gr[26] = usp;
 #endif
-               cregs->gr[25] = arg;
+               cregs->gr[25] = kthread_arg;
        } else {
                /* user thread */
                /* usp must be word aligned.  This also prevents users from
index e1ffea2f9a0b05ccda844969dcb7c519ab17077a..5aba01ac457ffc5d4823cf09c071f02f00a1d015 100644 (file)
@@ -77,6 +77,9 @@ static unsigned long mmap_upper_limit(void)
        if (stack_base > STACK_SIZE_MAX)
                stack_base = STACK_SIZE_MAX;
 
+       /* Add space for stack randomization. */
+       stack_base += (STACK_RND_MASK << PAGE_SHIFT);
+
        return PAGE_ALIGN(STACK_TOP - stack_base);
 }
 
index 47ee620d15d27850ab8ebac1f739dfd3215dae9b..6548fd1d2e62defc2dfc6e8dfc125a42aa8e986e 100644 (file)
@@ -26,9 +26,9 @@
 #include <linux/console.h>
 #include <linux/bug.h>
 #include <linux/ratelimit.h>
+#include <linux/uaccess.h>
 
 #include <asm/assembly.h>
-#include <asm/uaccess.h>
 #include <asm/io.h>
 #include <asm/irq.h>
 #include <asm/traps.h>
@@ -800,7 +800,7 @@ void notrace handle_interruption(int code, struct pt_regs *regs)
             * unless pagefault_disable() was called before.
             */
 
-           if (fault_space == 0 && !in_atomic())
+           if (fault_space == 0 && !faulthandler_disabled())
            {
                pdc_chassis_send_status(PDC_CHASSIS_DIRECT_PANIC);
                parisc_terminate("Kernel Fault", regs, code, fault_address);
index e5120e653240c4fa52d4895c7d1d206d3d12e68c..15503adddf4f59695d34f3b5adb428250594bf66 100644 (file)
@@ -15,8 +15,8 @@
 #include <linux/sched.h>
 #include <linux/interrupt.h>
 #include <linux/module.h>
+#include <linux/uaccess.h>
 
-#include <asm/uaccess.h>
 #include <asm/traps.h>
 
 /* Various important other fields */
@@ -207,7 +207,7 @@ void do_page_fault(struct pt_regs *regs, unsigned long code,
        int fault;
        unsigned int flags;
 
-       if (in_atomic())
+       if (pagefault_disabled())
                goto no_context;
 
        tsk = current;
index a3bf5be111ff1d073eb329476ce773c07ce32ed5..51ccc7232042e9f415c26580cca3a83d02175625 100644 (file)
@@ -34,7 +34,7 @@
 #define rmb()  __asm__ __volatile__ ("sync" : : : "memory")
 #define wmb()  __asm__ __volatile__ ("sync" : : : "memory")
 
-#define set_mb(var, value)     do { var = value; mb(); } while (0)
+#define smp_store_mb(var, value)       do { WRITE_ONCE(var, value); mb(); } while (0)
 
 #ifdef __SUBARCH_HAS_LWSYNC
 #    define SMPWMB      LWSYNC
@@ -89,5 +89,6 @@ do {                                                                  \
 
 #define smp_mb__before_atomic()     smp_mb()
 #define smp_mb__after_atomic()      smp_mb()
+#define smp_mb__before_spinlock()   smp_mb()
 
 #endif /* _ASM_POWERPC_BARRIER_H */
index d463c68fe7f05fa798a151ea179a5f88a842ddee..ad6263cffb0fd0b3e09b192748f57f9049ce3cae 100644 (file)
@@ -144,7 +144,6 @@ __xchg_local(volatile void *ptr, unsigned long x, unsigned int size)
  * Compare and exchange - if *p == old, set it to new,
  * and return the old value of *p.
  */
-#define __HAVE_ARCH_CMPXCHG    1
 
 static __always_inline unsigned long
 __cmpxchg_u32(volatile unsigned int *p, unsigned long old, unsigned long new)
index 5f1048eaa5b6041d1194457bec16509dcabe7907..8b3b46b7b0f2795b6195eb95ee649d3dece6dc9a 100644 (file)
@@ -87,7 +87,7 @@ static inline int prrn_is_enabled(void)
 #include <asm/smp.h>
 
 #define topology_physical_package_id(cpu)      (cpu_to_chip_id(cpu))
-#define topology_thread_cpumask(cpu)   (per_cpu(cpu_sibling_map, cpu))
+#define topology_sibling_cpumask(cpu)  (per_cpu(cpu_sibling_map, cpu))
 #define topology_core_cpumask(cpu)     (per_cpu(cpu_core_map, cpu))
 #define topology_core_id(cpu)          (cpu_to_core_id(cpu))
 #endif
index 15c99b649b04cf2e946e2a612a3660dc50e6712c..b2eb4686bd8f40155bbb4a982549de0ae8de8567 100644 (file)
@@ -73,7 +73,7 @@ void save_mce_event(struct pt_regs *regs, long handled,
                    uint64_t nip, uint64_t addr)
 {
        uint64_t srr1;
-       int index = __this_cpu_inc_return(mce_nest_count);
+       int index = __this_cpu_inc_return(mce_nest_count) - 1;
        struct machine_check_event *mce = this_cpu_ptr(&mce_event[index]);
 
        /*
@@ -184,7 +184,7 @@ void machine_check_queue_event(void)
        if (!get_mce_event(&evt, MCE_EVENT_RELEASE))
                return;
 
-       index = __this_cpu_inc_return(mce_queue_count);
+       index = __this_cpu_inc_return(mce_queue_count) - 1;
        /* If queue is full, just return for now. */
        if (index >= MAX_MC_EVT) {
                __this_cpu_dec(mce_queue_count);
index f096e72262f41d121398f8fa27d5286bd8641bf4..1db685104ffc2b298375c9062590ae1c2f811db9 100644 (file)
@@ -213,6 +213,7 @@ SECTIONS
                *(.opd)
        }
 
+       . = ALIGN(256);
        .got : AT(ADDR(.got) - LOAD_OFFSET) {
                __toc_start = .;
 #ifndef CONFIG_RELOCATABLE
index 48d3c5d2ecc9ee83aab086715b3ffd6bf904d80a..df81caab738339c5b8dfe7c2def731bb9ba640d1 100644 (file)
@@ -1952,7 +1952,7 @@ static void post_guest_process(struct kvmppc_vcore *vc)
  */
 static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
 {
-       struct kvm_vcpu *vcpu;
+       struct kvm_vcpu *vcpu, *vnext;
        int i;
        int srcu_idx;
 
@@ -1982,7 +1982,8 @@ static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
         */
        if ((threads_per_core > 1) &&
            ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
-               list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
+               list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
+                                        arch.run_list) {
                        vcpu->arch.ret = -EBUSY;
                        kvmppc_remove_runnable(vc, vcpu);
                        wake_up(&vcpu->arch.cpu_run);
index 3cf529ceec5bd6a85ad28b5dcf8e73dcd5d04c3c..ac93a3bd27300f9d058f45a2df4838379ecde52e 100644 (file)
@@ -27,11 +27,11 @@ int enter_vmx_usercopy(void)
        if (in_interrupt())
                return 0;
 
-       /* This acts as preempt_disable() as well and will make
-        * enable_kernel_altivec(). We need to disable page faults
-        * as they can call schedule and thus make us lose the VMX
-        * context. So on page faults, we just fail which will cause
-        * a fallback to the normal non-vmx copy.
+       preempt_disable();
+       /*
+        * We need to disable page faults as they can call schedule and
+        * thus make us lose the VMX context. So on page faults, we just
+        * fail which will cause a fallback to the normal non-vmx copy.
         */
        pagefault_disable();
 
@@ -47,6 +47,7 @@ int enter_vmx_usercopy(void)
 int exit_vmx_usercopy(void)
 {
        pagefault_enable();
+       preempt_enable();
        return 0;
 }
 
index b396868d2aa7c48438f1577df35759991b165f6a..6d535973b200dde0f64ae19a3d47f1eaca6a4c99 100644 (file)
 #include <linux/ratelimit.h>
 #include <linux/context_tracking.h>
 #include <linux/hugetlb.h>
+#include <linux/uaccess.h>
 
 #include <asm/firmware.h>
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/mmu.h>
 #include <asm/mmu_context.h>
-#include <asm/uaccess.h>
 #include <asm/tlbflush.h>
 #include <asm/siginfo.h>
 #include <asm/debug.h>
@@ -272,15 +272,16 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
        if (!arch_irq_disabled_regs(regs))
                local_irq_enable();
 
-       if (in_atomic() || mm == NULL) {
+       if (faulthandler_disabled() || mm == NULL) {
                if (!user_mode(regs)) {
                        rc = SIGSEGV;
                        goto bail;
                }
-               /* in_atomic() in user mode is really bad,
+               /* faulthandler_disabled() in user mode is really bad,
                   as is current->mm == NULL. */
                printk(KERN_EMERG "Page fault in user mode with "
-                      "in_atomic() = %d mm = %p\n", in_atomic(), mm);
+                      "faulthandler_disabled() = %d mm = %p\n",
+                      faulthandler_disabled(), mm);
                printk(KERN_EMERG "NIP = %lx  MSR = %lx\n",
                       regs->nip, regs->msr);
                die("Weird page fault", regs, SIGSEGV);
index e7450bdbe83a9380264fc149c4831b587226cd36..e292c8a609523bd30e860f5e1a469872389e7421 100644 (file)
@@ -34,7 +34,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
        unsigned long vaddr;
        int idx, type;
 
-       /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
+       preempt_disable();
        pagefault_disable();
        if (!PageHighMem(page))
                return page_address(page);
@@ -59,6 +59,7 @@ void __kunmap_atomic(void *kvaddr)
 
        if (vaddr < __fix_to_virt(FIX_KMAP_END)) {
                pagefault_enable();
+               preempt_enable();
                return;
        }
 
@@ -82,5 +83,6 @@ void __kunmap_atomic(void *kvaddr)
 
        kmap_atomic_idx_pop();
        pagefault_enable();
+       preempt_enable();
 }
 EXPORT_SYMBOL(__kunmap_atomic);
index 0ce968b00b7c665967ca3addc26d0cc04a72f729..3385e3d0506ec575f3eeebad77d2c65264a3acf3 100644 (file)
@@ -689,27 +689,34 @@ void hugetlb_free_pgd_range(struct mmu_gather *tlb,
 struct page *
 follow_huge_addr(struct mm_struct *mm, unsigned long address, int write)
 {
-       pte_t *ptep;
-       struct page *page;
+       pte_t *ptep, pte;
        unsigned shift;
        unsigned long mask, flags;
+       struct page *page = ERR_PTR(-EINVAL);
+
+       local_irq_save(flags);
+       ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
+       if (!ptep)
+               goto no_page;
+       pte = READ_ONCE(*ptep);
        /*
+        * Verify it is a huge page else bail.
         * Transparent hugepages are handled by generic code. We can skip them
         * here.
         */
-       local_irq_save(flags);
-       ptep = find_linux_pte_or_hugepte(mm->pgd, address, &shift);
+       if (!shift || pmd_trans_huge(__pmd(pte_val(pte))))
+               goto no_page;
 
-       /* Verify it is a huge page else bail. */
-       if (!ptep || !shift || pmd_trans_huge(*(pmd_t *)ptep)) {
-               local_irq_restore(flags);
-               return ERR_PTR(-EINVAL);
+       if (!pte_present(pte)) {
+               page = NULL;
+               goto no_page;
        }
        mask = (1UL << shift) - 1;
-       page = pte_page(*ptep);
+       page = pte_page(pte);
        if (page)
                page += (address & mask) / PAGE_SIZE;
 
+no_page:
        local_irq_restore(flags);
        return page;
 }
index 59daa5eeec2526ae481a5dda586c629a985813d7..6bfadf1aa5cbbfadbd7237e5f3da8ac8653a5651 100644 (file)
@@ -839,6 +839,17 @@ pmd_t pmdp_get_and_clear(struct mm_struct *mm,
         * hash fault look at them.
         */
        memset(pgtable, 0, PTE_FRAG_SIZE);
+       /*
+        * Serialize against find_linux_pte_or_hugepte which does lock-less
+        * lookup in page tables with local interrupts disabled. For huge pages
+        * it casts pmd_t to pte_t. Since format of pte_t is different from
+        * pmd_t we want to prevent transit from pmd pointing to page table
+        * to pmd pointing to huge page (and back) while interrupts are disabled.
+        * We clear pmd to possibly replace it with page table pointer in
+        * different code paths. So make sure we wait for the parallel
+        * find_linux_pte_or_hugepage to finish.
+        */
+       kick_all_cpus_sync();
        return old_pmd;
 }
 
index cbd3d069897f61d11c8c4497700f1d746178a0f7..723a099f6be31ac425873a6363be7c19f916dea6 100644 (file)
@@ -217,7 +217,7 @@ static DEFINE_RAW_SPINLOCK(tlbivax_lock);
 static int mm_is_core_local(struct mm_struct *mm)
 {
        return cpumask_subset(mm_cpumask(mm),
-                             topology_thread_cpumask(smp_processor_id()));
+                             topology_sibling_cpumask(smp_processor_id()));
 }
 
 struct tlb_flush_param {
index 7940dc90e80bc6729371ab565bad743b1087ef72..b258110da952d320443d113cdcff38beb14ae432 100644 (file)
 #define GHASH_DIGEST_SIZE      16
 
 struct ghash_ctx {
-       u8 icv[16];
-       u8 key[16];
+       u8 key[GHASH_BLOCK_SIZE];
 };
 
 struct ghash_desc_ctx {
+       u8 icv[GHASH_BLOCK_SIZE];
+       u8 key[GHASH_BLOCK_SIZE];
        u8 buffer[GHASH_BLOCK_SIZE];
        u32 bytes;
 };
@@ -28,8 +29,10 @@ struct ghash_desc_ctx {
 static int ghash_init(struct shash_desc *desc)
 {
        struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
+       struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
 
        memset(dctx, 0, sizeof(*dctx));
+       memcpy(dctx->key, ctx->key, GHASH_BLOCK_SIZE);
 
        return 0;
 }
@@ -45,7 +48,6 @@ static int ghash_setkey(struct crypto_shash *tfm,
        }
 
        memcpy(ctx->key, key, GHASH_BLOCK_SIZE);
-       memset(ctx->icv, 0, GHASH_BLOCK_SIZE);
 
        return 0;
 }
@@ -54,7 +56,6 @@ static int ghash_update(struct shash_desc *desc,
                         const u8 *src, unsigned int srclen)
 {
        struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
-       struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
        unsigned int n;
        u8 *buf = dctx->buffer;
        int ret;
@@ -70,7 +71,7 @@ static int ghash_update(struct shash_desc *desc,
                src += n;
 
                if (!dctx->bytes) {
-                       ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf,
+                       ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf,
                                              GHASH_BLOCK_SIZE);
                        if (ret != GHASH_BLOCK_SIZE)
                                return -EIO;
@@ -79,7 +80,7 @@ static int ghash_update(struct shash_desc *desc,
 
        n = srclen & ~(GHASH_BLOCK_SIZE - 1);
        if (n) {
-               ret = crypt_s390_kimd(KIMD_GHASH, ctx, src, n);
+               ret = crypt_s390_kimd(KIMD_GHASH, dctx, src, n);
                if (ret != n)
                        return -EIO;
                src += n;
@@ -94,7 +95,7 @@ static int ghash_update(struct shash_desc *desc,
        return 0;
 }
 
-static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
+static int ghash_flush(struct ghash_desc_ctx *dctx)
 {
        u8 *buf = dctx->buffer;
        int ret;
@@ -104,24 +105,24 @@ static int ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx)
 
                memset(pos, 0, dctx->bytes);
 
-               ret = crypt_s390_kimd(KIMD_GHASH, ctx, buf, GHASH_BLOCK_SIZE);
+               ret = crypt_s390_kimd(KIMD_GHASH, dctx, buf, GHASH_BLOCK_SIZE);
                if (ret != GHASH_BLOCK_SIZE)
                        return -EIO;
+
+               dctx->bytes = 0;
        }
 
-       dctx->bytes = 0;
        return 0;
 }
 
 static int ghash_final(struct shash_desc *desc, u8 *dst)
 {
        struct ghash_desc_ctx *dctx = shash_desc_ctx(desc);
-       struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm);
        int ret;
 
-       ret = ghash_flush(ctx, dctx);
+       ret = ghash_flush(dctx);
        if (!ret)
-               memcpy(dst, ctx->icv, GHASH_BLOCK_SIZE);
+               memcpy(dst, dctx->icv, GHASH_BLOCK_SIZE);
        return ret;
 }
 
index 1f374b39a4ec9a933249b3924d393d118084112f..9d5192c9496317d490a338e19a69718ef00741dd 100644 (file)
@@ -125,7 +125,7 @@ static int generate_entropy(u8 *ebuf, size_t nbytes)
                /* fill page with urandom bytes */
                get_random_bytes(pg, PAGE_SIZE);
                /* exor page with stckf values */
-               for (n = 0; n < sizeof(PAGE_SIZE/sizeof(u64)); n++) {
+               for (n = 0; n < PAGE_SIZE / sizeof(u64); n++) {
                        u64 *p = ((u64 *)pg) + n;
                        *p ^= get_tod_clock_fast();
                }
index 8d724718ec21c8d0331e1b8fbc14c96cb5892649..e6f8615a11eb5c72ed84576de97924f5481031ae 100644 (file)
@@ -36,7 +36,7 @@
 #define smp_mb__before_atomic()                smp_mb()
 #define smp_mb__after_atomic()         smp_mb()
 
-#define set_mb(var, value)             do { var = value; mb(); } while (0)
+#define smp_store_mb(var, value)               do { WRITE_ONCE(var, value); mb(); } while (0)
 
 #define smp_store_release(p, v)                                                \
 do {                                                                   \
index 4eadec466b8ca3ecc5ba29881856fef9f85d79f3..411464f4c97a57fd49d87debc7ef041767342699 100644 (file)
@@ -32,8 +32,6 @@
        __old;                                                          \
 })
 
-#define __HAVE_ARCH_CMPXCHG
-
 #define __cmpxchg_double_op(p1, p2, o1, o2, n1, n2, insn)              \
 ({                                                                     \
        register __typeof__(*(p1)) __old1 asm("2") = (o1);              \
index 30fd5c84680e84f60fe438f6e9c0bc5b45f58ba4..cb5fdf3a78fc8205929fa995676961afe3d90798 100644 (file)
@@ -29,6 +29,7 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
 
 #define ioremap_nocache(addr, size)    ioremap(addr, size)
 #define ioremap_wc                     ioremap_nocache
+#define ioremap_wt                     ioremap_nocache
 
 static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
 {
index fc642399b489d896f2b4494cc57456ab7bd2ddb9..ef24a212eeb727b8d8df3326115ef17dbeb0a272 100644 (file)
@@ -494,7 +494,7 @@ static inline int pmd_large(pmd_t pmd)
        return (pmd_val(pmd) & _SEGMENT_ENTRY_LARGE) != 0;
 }
 
-static inline int pmd_pfn(pmd_t pmd)
+static inline unsigned long pmd_pfn(pmd_t pmd)
 {
        unsigned long origin_mask;
 
index 98eb2a5792234d9d12c303bdb1301f869f706b60..dcb6312a0b918089118c7e7835610addefbe2309 100644 (file)
@@ -10,6 +10,7 @@
 #define _ASM_S390_TIMEX_H
 
 #include <asm/lowcore.h>
+#include <linux/time64.h>
 
 /* The value of the TOD clock for 1.1.1970. */
 #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
@@ -108,10 +109,10 @@ int get_sync_clock(unsigned long long *clock);
 void init_cpu_timer(void);
 unsigned long long monotonic_clock(void);
 
-void tod_to_timeval(__u64, struct timespec *);
+void tod_to_timeval(__u64 todval, struct timespec64 *xt);
 
 static inline
-void stck_to_timespec(unsigned long long stck, struct timespec *ts)
+void stck_to_timespec64(unsigned long long stck, struct timespec64 *ts)
 {
        tod_to_timeval(stck - TOD_UNIX_EPOCH, ts);
 }
index b1453a2ae1ca583b2d4a0dc99bc325fd30ddf10d..4990f6c66288582b21ad01295b032cb13da9ee31 100644 (file)
@@ -22,7 +22,8 @@ DECLARE_PER_CPU(struct cpu_topology_s390, cpu_topology);
 
 #define topology_physical_package_id(cpu) (per_cpu(cpu_topology, cpu).socket_id)
 #define topology_thread_id(cpu)                  (per_cpu(cpu_topology, cpu).thread_id)
-#define topology_thread_cpumask(cpu)     (&per_cpu(cpu_topology, cpu).thread_mask)
+#define topology_sibling_cpumask(cpu) \
+               (&per_cpu(cpu_topology, cpu).thread_mask)
 #define topology_core_id(cpu)            (per_cpu(cpu_topology, cpu).core_id)
 #define topology_core_cpumask(cpu)       (&per_cpu(cpu_topology, cpu).core_mask)
 #define topology_book_id(cpu)            (per_cpu(cpu_topology, cpu).book_id)
index d64a7a62164f854e3fd627694189c37e2860c92b..9dd4cc47ddc79298886fe40a5e229628b2ceff3d 100644 (file)
@@ -98,7 +98,8 @@ static inline unsigned long extable_fixup(const struct exception_table_entry *x)
  * @from: Source address, in user space.
  * @n:   Number of bytes to copy.
  *
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * Copy data from user space to kernel space.  Caller must check
  * the specified block with access_ok() before calling this function.
@@ -118,7 +119,8 @@ unsigned long __must_check __copy_from_user(void *to, const void __user *from,
  * @from: Source address, in kernel space.
  * @n:   Number of bytes to copy.
  *
- * Context: User context only. This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * Copy data from kernel space to user space.  Caller must check
  * the specified block with access_ok() before calling this function.
@@ -264,7 +266,8 @@ int __get_user_bad(void) __attribute__((noreturn));
  * @from: Source address, in kernel space.
  * @n:    Number of bytes to copy.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * Copy data from kernel space to user space.
  *
@@ -290,7 +293,8 @@ __compiletime_warning("copy_from_user() buffer size is not provably correct")
  * @from: Source address, in user space.
  * @n:    Number of bytes to copy.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * Copy data from user space to kernel space.
  *
@@ -348,7 +352,8 @@ static inline unsigned long strnlen_user(const char __user *src, unsigned long n
  * strlen_user: - Get the size of a string in user space.
  * @str: The string to measure.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * Get the size of a NUL-terminated string in user space.
  *
index c1f21aca76e7fe0b1f7200f82c97646c51b08c00..6fca0e46464e01842f613584e4c82e5ca1fe4270 100644 (file)
@@ -1457,23 +1457,24 @@ int
 debug_dflt_header_fn(debug_info_t * id, struct debug_view *view,
                         int area, debug_entry_t * entry, char *out_buf)
 {
-       struct timespec time_spec;
+       struct timespec64 time_spec;
        char *except_str;
        unsigned long caller;
        int rc = 0;
        unsigned int level;
 
        level = entry->id.fields.level;
-       stck_to_timespec(entry->id.stck, &time_spec);
+       stck_to_timespec64(entry->id.stck, &time_spec);
 
        if (entry->id.fields.exception)
                except_str = "*";
        else
                except_str = "-";
        caller = ((unsigned long) entry->caller) & PSW_ADDR_INSN;
-       rc += sprintf(out_buf, "%02i %011lu:%06lu %1u %1s %02i %p  ",
-                     area, time_spec.tv_sec, time_spec.tv_nsec / 1000, level,
-                     except_str, entry->id.fields.cpuid, (void *) caller);
+       rc += sprintf(out_buf, "%02i %011lld:%06lu %1u %1s %02i %p  ",
+                     area, (long long)time_spec.tv_sec,
+                     time_spec.tv_nsec / 1000, level, except_str,
+                     entry->id.fields.cpuid, (void *)caller);
        return rc;
 }
 EXPORT_SYMBOL(debug_dflt_header_fn);
index 170ddd2018b31667df8619b471df42b7fb562705..9e733d965e08886611ef40535f09505c5b7b9878 100644 (file)
@@ -76,7 +76,7 @@ unsigned long long monotonic_clock(void)
 }
 EXPORT_SYMBOL(monotonic_clock);
 
-void tod_to_timeval(__u64 todval, struct timespec *xt)
+void tod_to_timeval(__u64 todval, struct timespec64 *xt)
 {
        unsigned long long sec;
 
@@ -181,12 +181,12 @@ static void timing_alert_interrupt(struct ext_code ext_code,
 static void etr_reset(void);
 static void stp_reset(void);
 
-void read_persistent_clock(struct timespec *ts)
+void read_persistent_clock64(struct timespec64 *ts)
 {
        tod_to_timeval(get_tod_clock() - TOD_UNIX_EPOCH, ts);
 }
 
-void read_boot_clock(struct timespec *ts)
+void read_boot_clock64(struct timespec64 *ts)
 {
        tod_to_timeval(sched_clock_base_cc - TOD_UNIX_EPOCH, ts);
 }
index 76515bcea2f18f5f78e59b86c0bd331fed0380f2..4c8f5d7f9c23d74c960cd1172f266391ba8ec1a3 100644 (file)
@@ -399,7 +399,7 @@ static inline int do_exception(struct pt_regs *regs, int access)
         * user context.
         */
        fault = VM_FAULT_BADCONTEXT;
-       if (unlikely(!user_space_fault(regs) || in_atomic() || !mm))
+       if (unlikely(!user_space_fault(regs) || faulthandler_disabled() || !mm))
                goto out;
 
        address = trans_exc_code & __FAIL_ADDR_MASK;
index ba8593a515baaa274d968aa64e6f54125238c032..de156ba3bd71c0d4db274a619c7c9fd6038c119c 100644 (file)
@@ -48,7 +48,9 @@ extern u8 sk_load_word[], sk_load_half[], sk_load_byte[];
  * We get 160 bytes stack space from calling function, but only use
  * 11 * 8 byte (old backchain + r15 - r6) for storing registers.
  */
-#define STK_OFF (MAX_BPF_STACK + 8 + 4 + 4 + (160 - 11 * 8))
+#define STK_SPACE      (MAX_BPF_STACK + 8 + 4 + 4 + 160)
+#define STK_160_UNUSED (160 - 11 * 8)
+#define STK_OFF                (STK_SPACE - STK_160_UNUSED)
 #define STK_OFF_TMP    160     /* Offset of tmp buffer on stack */
 #define STK_OFF_HLEN   168     /* Offset of SKB header length on stack */
 
index 7690dc8e1ab5bb619bb19ec1be1b12853e1b0589..55423d8be580113d045d30edbf86d26fb74340ff 100644 (file)
@@ -384,13 +384,16 @@ static void bpf_jit_prologue(struct bpf_jit *jit)
        }
        /* Setup stack and backchain */
        if (jit->seen & SEEN_STACK) {
-               /* lgr %bfp,%r15 (BPF frame pointer) */
-               EMIT4(0xb9040000, BPF_REG_FP, REG_15);
+               if (jit->seen & SEEN_FUNC)
+                       /* lgr %w1,%r15 (backchain) */
+                       EMIT4(0xb9040000, REG_W1, REG_15);
+               /* la %bfp,STK_160_UNUSED(%r15) (BPF frame pointer) */
+               EMIT4_DISP(0x41000000, BPF_REG_FP, REG_15, STK_160_UNUSED);
                /* aghi %r15,-STK_OFF */
                EMIT4_IMM(0xa70b0000, REG_15, -STK_OFF);
                if (jit->seen & SEEN_FUNC)
-                       /* stg %bfp,152(%r15) (backchain) */
-                       EMIT6_DISP_LH(0xe3000000, 0x0024, BPF_REG_FP, REG_0,
+                       /* stg %w1,152(%r15) (backchain) */
+                       EMIT6_DISP_LH(0xe3000000, 0x0024, REG_W1, REG_0,
                                      REG_15, 152);
        }
        /*
@@ -443,8 +446,11 @@ static void bpf_jit_epilogue(struct bpf_jit *jit)
 
 /*
  * Compile one eBPF instruction into s390x code
+ *
+ * NOTE: Use noinline because for gcov (-fprofile-arcs) gcc allocates a lot of
+ * stack space for the large switch statement.
  */
-static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
+static noinline int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
 {
        struct bpf_insn *insn = &fp->insnsi[i];
        int jmp_off, last, insn_count = 1;
@@ -588,8 +594,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
                EMIT4(0xb9160000, dst_reg, rc_reg);
                break;
        }
-       case BPF_ALU64 | BPF_DIV | BPF_X: /* dst = dst / (u32) src */
-       case BPF_ALU64 | BPF_MOD | BPF_X: /* dst = dst % (u32) src */
+       case BPF_ALU64 | BPF_DIV | BPF_X: /* dst = dst / src */
+       case BPF_ALU64 | BPF_MOD | BPF_X: /* dst = dst % src */
        {
                int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 
@@ -602,10 +608,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
                EMIT4_IMM(0xa7090000, REG_W0, 0);
                /* lgr %w1,%dst */
                EMIT4(0xb9040000, REG_W1, dst_reg);
-               /* llgfr %dst,%src (u32 cast) */
-               EMIT4(0xb9160000, dst_reg, src_reg);
                /* dlgr %w0,%dst */
-               EMIT4(0xb9870000, REG_W0, dst_reg);
+               EMIT4(0xb9870000, REG_W0, src_reg);
                /* lgr %dst,%rc */
                EMIT4(0xb9040000, dst_reg, rc_reg);
                break;
@@ -632,8 +636,8 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
                EMIT4(0xb9160000, dst_reg, rc_reg);
                break;
        }
-       case BPF_ALU64 | BPF_DIV | BPF_K: /* dst = dst / (u32) imm */
-       case BPF_ALU64 | BPF_MOD | BPF_K: /* dst = dst % (u32) imm */
+       case BPF_ALU64 | BPF_DIV | BPF_K: /* dst = dst / imm */
+       case BPF_ALU64 | BPF_MOD | BPF_K: /* dst = dst % imm */
        {
                int rc_reg = BPF_OP(insn->code) == BPF_DIV ? REG_W1 : REG_W0;
 
@@ -649,7 +653,7 @@ static int bpf_jit_insn(struct bpf_jit *jit, struct bpf_prog *fp, int i)
                EMIT4(0xb9040000, REG_W1, dst_reg);
                /* dlg %w0,<d(imm)>(%l) */
                EMIT6_DISP_LH(0xe3000000, 0x0087, REG_W0, REG_0, REG_L,
-                             EMIT_CONST_U64((u32) imm));
+                             EMIT_CONST_U64(imm));
                /* lgr %dst,%rc */
                EMIT4(0xb9040000, dst_reg, rc_reg);
                break;
index f384839c3ee53e2608209217372423025e21e911..cc3f6420b71c1d6438067761c3a905f6033dc36a 100644 (file)
@@ -42,8 +42,6 @@ static inline unsigned long __cmpxchg(volatile unsigned long *m,
                                        (unsigned long)(o),     \
                                        (unsigned long)(n)))
 
-#define __HAVE_ARCH_CMPXCHG    1
-
 #include <asm-generic/cmpxchg-local.h>
 
 #endif /* _ASM_SCORE_CMPXCHG_H */
index ab66ddde777b5a3413cb2e334fa4ca6be6614c97..20a3591225ccea9aa3d93acc11a97a34bf254501 100644 (file)
@@ -36,7 +36,8 @@
  * @addr: User space pointer to start of block to check
  * @size: Size of block to check
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * Checks if a pointer to a block of memory in user space is valid.
  *
@@ -61,7 +62,8 @@
  * @x:   Value to copy to user space.
  * @ptr: Destination address, in user space.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * This macro copies a single simple value from kernel space to user
  * space.  It supports simple types like char and int, but not larger
@@ -79,7 +81,8 @@
  * @x:   Variable to store result.
  * @ptr: Source address, in user space.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * This macro copies a single simple variable from user space to kernel
  * space.  It supports simple types like char and int, but not larger
  * @x:   Value to copy to user space.
  * @ptr: Destination address, in user space.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * This macro copies a single simple value from kernel space to user
  * space.  It supports simple types like char and int, but not larger
  * @x:   Variable to store result.
  * @ptr: Source address, in user space.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * This macro copies a single simple variable from user space to kernel
  * space.  It supports simple types like char and int, but not larger
index 00b7d3a2fc60681253eb2e1c1b874e48bbd02a4a..16efa3ad037f7cffbdbb4a5ffcf57a5d25325648 100644 (file)
@@ -175,10 +175,10 @@ ENTRY(__clear_user)
        br      r3
 
        .section .fixup, "ax"
+99:
        br      r3
        .previous
        .section __ex_table, "a"
        .align  2
-99:
        .word   0b, 99b
        .previous
index 6860beb2a280d0a4a65a67c89ad2201b33513068..37a6c2e0e96926f26902484969befaf860fc52fc 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/string.h>
 #include <linux/types.h>
 #include <linux/ptrace.h>
+#include <linux/uaccess.h>
 
 /*
  * This routine handles page faults.  It determines the address,
@@ -73,7 +74,7 @@ asmlinkage void do_page_fault(struct pt_regs *regs, unsigned long write,
        * If we're in an interrupt or have no user
        * context, we must not take the fault..
        */
-       if (in_atomic() || !mm)
+       if (pagefault_disabled() || !mm)
                goto bad_area_nosemaphore;
 
        if (user_mode(regs))
index 43715308b06874c115d1a5477b00beea79d68a7c..bf91037db4e01f66dcf6a2e84536279dcbf028c1 100644 (file)
@@ -32,7 +32,7 @@
 #define ctrl_barrier() __asm__ __volatile__ ("nop;nop;nop;nop;nop;nop;nop;nop")
 #endif
 
-#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
+#define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
 
 #include <asm-generic/barrier.h>
 
index f6bd1406b897fc927510724d8abd99e807b85ad8..85c97b188d71647683df8bebc6fd7fbbb3d55442 100644 (file)
@@ -46,8 +46,6 @@ extern void __xchg_called_with_bad_pointer(void);
  * if something tries to do an invalid cmpxchg(). */
 extern void __cmpxchg_called_with_bad_pointer(void);
 
-#define __HAVE_ARCH_CMPXCHG 1
-
 static inline unsigned long __cmpxchg(volatile void * ptr, unsigned long old,
                unsigned long new, int size)
 {
index a58fec9b55e016df85cdfb7c214cc385e300479c..79d8276377d1e2f62e6c9231f735ef0f1c5ca22e 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/kprobes.h>
 #include <linux/perf_event.h>
 #include <linux/kdebug.h>
+#include <linux/uaccess.h>
 #include <asm/io_trapped.h>
 #include <asm/mmu_context.h>
 #include <asm/tlbflush.h>
@@ -438,9 +439,9 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
 
        /*
         * If we're in an interrupt, have no user context or are running
-        * in an atomic region then we must not take the fault:
+        * with pagefaults disabled then we must not take the fault:
         */
-       if (unlikely(in_atomic() || !mm)) {
+       if (unlikely(faulthandler_disabled() || !mm)) {
                bad_area_nosemaphore(regs, error_code, address);
                return;
        }
index 76648941fea71b4327e058d6e995a3331587541c..809941e33e1217489b1c88caa4c000cf55a08b62 100644 (file)
@@ -40,8 +40,8 @@ do {  __asm__ __volatile__("ba,pt     %%xcc, 1f\n\t" \
 #define dma_rmb()      rmb()
 #define dma_wmb()      wmb()
 
-#define set_mb(__var, __value) \
-       do { __var = __value; membar_safe("#StoreLoad"); } while(0)
+#define smp_store_mb(__var, __value) \
+       do { WRITE_ONCE(__var, __value); membar_safe("#StoreLoad"); } while(0)
 
 #ifdef CONFIG_SMP
 #define smp_mb()       mb()
index d38b52dca216273a147328cc2d0288c20c2eeba6..83ffb83c5397f3e4bede59570856a11c41d9fc0e 100644 (file)
@@ -34,7 +34,6 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr, int
  *
  * Cribbed from <asm-parisc/atomic.h>
  */
-#define __HAVE_ARCH_CMPXCHG    1
 
 /* bug catcher for when unsupported size is used - won't link */
 void __cmpxchg_called_with_bad_pointer(void);
index 0e1ed6cfbf68faa50792369faa69b4f917db1fd1..faa2f61058c271abf9ce799a02c9a55678464533 100644 (file)
@@ -65,8 +65,6 @@ static inline unsigned long __xchg(unsigned long x, __volatile__ void * ptr,
 
 #include <asm-generic/cmpxchg-local.h>
 
-#define __HAVE_ARCH_CMPXCHG 1
-
 static inline unsigned long
 __cmpxchg_u32(volatile int *m, int old, int new)
 {
index a6e424d185d063bdddd43719e6a994fad6902c17..a6cfdabb6054aef28846342f49fdb2718e1263a5 100644 (file)
@@ -24,7 +24,8 @@ typedef struct {
        unsigned int    icache_line_size;
        unsigned int    ecache_size;
        unsigned int    ecache_line_size;
-       int             core_id;
+       unsigned short  sock_id;
+       unsigned short  core_id;
        int             proc_id;
 } cpuinfo_sparc;
 
index 407ac14295f43ec01e93141e1673794d5e3f024c..57f26c398dc9d21961e3d811ac1e11c8ea4ccf71 100644 (file)
@@ -129,6 +129,7 @@ static inline void sbus_memcpy_toio(volatile void __iomem *dst,
 void __iomem *ioremap(unsigned long offset, unsigned long size);
 #define ioremap_nocache(X,Y)   ioremap((X),(Y))
 #define ioremap_wc(X,Y)                ioremap((X),(Y))
+#define ioremap_wt(X,Y)                ioremap((X),(Y))
 void iounmap(volatile void __iomem *addr);
 
 /* Create a virtual mapping cookie for an IO port range */
index 50d4840d9aebbfa036c7a110e9f866fa77007647..c32fa3f752c8ea4954d2d2faeb7bc3f1c2015453 100644 (file)
@@ -402,6 +402,7 @@ static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
 
 #define ioremap_nocache(X,Y)           ioremap((X),(Y))
 #define ioremap_wc(X,Y)                        ioremap((X),(Y))
+#define ioremap_wt(X,Y)                        ioremap((X),(Y))
 
 static inline void iounmap(volatile void __iomem *addr)
 {
index dc165ebdf05aef6086bf5d5c5b1dd3a85f686648..2a52c91d2c8acbf5f904e082400ba782d7279947 100644 (file)
@@ -308,12 +308,26 @@ static inline pte_t pte_modify(pte_t pte, pgprot_t prot)
        "       sllx            %1, 32, %1\n"
        "       or              %0, %1, %0\n"
        "       .previous\n"
+       "       .section        .sun_m7_2insn_patch, \"ax\"\n"
+       "       .word           661b\n"
+       "       sethi           %%uhi(%4), %1\n"
+       "       sethi           %%hi(%4), %0\n"
+       "       .word           662b\n"
+       "       or              %1, %%ulo(%4), %1\n"
+       "       or              %0, %%lo(%4), %0\n"
+       "       .word           663b\n"
+       "       sllx            %1, 32, %1\n"
+       "       or              %0, %1, %0\n"
+       "       .previous\n"
        : "=r" (mask), "=r" (tmp)
        : "i" (_PAGE_PADDR_4U | _PAGE_MODIFIED_4U | _PAGE_ACCESSED_4U |
               _PAGE_CP_4U | _PAGE_CV_4U | _PAGE_E_4U |
               _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4U),
          "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
               _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_E_4V |
+              _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V),
+         "i" (_PAGE_PADDR_4V | _PAGE_MODIFIED_4V | _PAGE_ACCESSED_4V |
+              _PAGE_CP_4V | _PAGE_E_4V |
               _PAGE_SPECIAL | _PAGE_PMD_HUGE | _PAGE_SZALL_4V));
 
        return __pte((pte_val(pte) & mask) | (pgprot_val(prot) & ~mask));
@@ -342,9 +356,15 @@ static inline pgprot_t pgprot_noncached(pgprot_t prot)
        "       andn            %0, %4, %0\n"
        "       or              %0, %5, %0\n"
        "       .previous\n"
+       "       .section        .sun_m7_2insn_patch, \"ax\"\n"
+       "       .word           661b\n"
+       "       andn            %0, %6, %0\n"
+       "       or              %0, %5, %0\n"
+       "       .previous\n"
        : "=r" (val)
        : "0" (val), "i" (_PAGE_CP_4U | _PAGE_CV_4U), "i" (_PAGE_E_4U),
-                    "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V));
+                    "i" (_PAGE_CP_4V | _PAGE_CV_4V), "i" (_PAGE_E_4V),
+                    "i" (_PAGE_CP_4V));
 
        return __pgprot(val);
 }
index ed8f071132e4d0e045bd9ffe22ce90f6604e0e3a..01d17046225a8aa084b2a8f9d70a235bcab681f8 100644 (file)
@@ -40,11 +40,12 @@ static inline int pcibus_to_node(struct pci_bus *pbus)
 #ifdef CONFIG_SMP
 #define topology_physical_package_id(cpu)      (cpu_data(cpu).proc_id)
 #define topology_core_id(cpu)                  (cpu_data(cpu).core_id)
-#define topology_core_cpumask(cpu)             (&cpu_core_map[cpu])
-#define topology_thread_cpumask(cpu)           (&per_cpu(cpu_sibling_map, cpu))
+#define topology_core_cpumask(cpu)             (&cpu_core_sib_map[cpu])
+#define topology_sibling_cpumask(cpu)          (&per_cpu(cpu_sibling_map, cpu))
 #endif /* CONFIG_SMP */
 
 extern cpumask_t cpu_core_map[NR_CPUS];
+extern cpumask_t cpu_core_sib_map[NR_CPUS];
 static inline const struct cpumask *cpu_coregroup_mask(int cpu)
 {
         return &cpu_core_map[cpu];
index 6fd4436d32f06a59ed3113db3e6e52ddf3d3fa93..ec9c04de3664910d81b7a55bbb09084d5e235d39 100644 (file)
@@ -79,6 +79,8 @@ struct sun4v_2insn_patch_entry {
 };
 extern struct sun4v_2insn_patch_entry __sun4v_2insn_patch,
        __sun4v_2insn_patch_end;
+extern struct sun4v_2insn_patch_entry __sun_m7_2insn_patch,
+       __sun_m7_2insn_patch_end;
 
 
 #endif /* !(__ASSEMBLY__) */
index 07cc49e541f40ea2cacc1f952aa7e07dd4a4e69b..0f679421b468343c747ac48abd2046b7e6ce051e 100644 (file)
@@ -69,6 +69,8 @@ void sun4v_patch_1insn_range(struct sun4v_1insn_patch_entry *,
                             struct sun4v_1insn_patch_entry *);
 void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *,
                             struct sun4v_2insn_patch_entry *);
+void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *,
+                            struct sun4v_2insn_patch_entry *);
 extern unsigned int dcache_parity_tl1_occurred;
 extern unsigned int icache_parity_tl1_occurred;
 
index 94e392bdee7dce5c984cc9f6f70307313a8f5a01..814fb1729b120bdeccbe2aacea958e7ae8add28d 100644 (file)
@@ -723,7 +723,6 @@ static int grpci2_of_probe(struct platform_device *ofdev)
                err = -ENOMEM;
                goto err1;
        }
-       memset(grpci2priv, 0, sizeof(*grpci2priv));
        priv->regs = regs;
        priv->irq = ofdev->archdata.irqs[0]; /* BASE IRQ */
        priv->irq_mode = (capability & STS_IRQMODE) >> STS_IRQMODE_BIT;
index 26c80e18d7b1b47bd74e9fce01bdb48eeb88fcbd..6f80936e0eea4d0dab82966b8f69cd7e6127b1dd 100644 (file)
@@ -614,45 +614,68 @@ static void fill_in_one_cache(cpuinfo_sparc *c, struct mdesc_handle *hp, u64 mp)
        }
 }
 
-static void mark_core_ids(struct mdesc_handle *hp, u64 mp, int core_id)
+static void find_back_node_value(struct mdesc_handle *hp, u64 node,
+                                char *srch_val,
+                                void (*func)(struct mdesc_handle *, u64, int),
+                                u64 val, int depth)
 {
-       u64 a;
-
-       mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_BACK) {
-               u64 t = mdesc_arc_target(hp, a);
-               const char *name;
-               const u64 *id;
+       u64 arc;
 
-               name = mdesc_node_name(hp, t);
-               if (!strcmp(name, "cpu")) {
-                       id = mdesc_get_property(hp, t, "id", NULL);
-                       if (*id < NR_CPUS)
-                               cpu_data(*id).core_id = core_id;
-               } else {
-                       u64 j;
+       /* Since we have an estimate of recursion depth, do a sanity check. */
+       if (depth == 0)
+               return;
 
-                       mdesc_for_each_arc(j, hp, t, MDESC_ARC_TYPE_BACK) {
-                               u64 n = mdesc_arc_target(hp, j);
-                               const char *n_name;
+       mdesc_for_each_arc(arc, hp, node, MDESC_ARC_TYPE_BACK) {
+               u64 n = mdesc_arc_target(hp, arc);
+               const char *name = mdesc_node_name(hp, n);
 
-                               n_name = mdesc_node_name(hp, n);
-                               if (strcmp(n_name, "cpu"))
-                                       continue;
+               if (!strcmp(srch_val, name))
+                       (*func)(hp, n, val);
 
-                               id = mdesc_get_property(hp, n, "id", NULL);
-                               if (*id < NR_CPUS)
-                                       cpu_data(*id).core_id = core_id;
-                       }
-               }
+               find_back_node_value(hp, n, srch_val, func, val, depth-1);
        }
 }
 
+static void __mark_core_id(struct mdesc_handle *hp, u64 node,
+                          int core_id)
+{
+       const u64 *id = mdesc_get_property(hp, node, "id", NULL);
+
+       if (*id < num_possible_cpus())
+               cpu_data(*id).core_id = core_id;
+}
+
+static void __mark_sock_id(struct mdesc_handle *hp, u64 node,
+                          int sock_id)
+{
+       const u64 *id = mdesc_get_property(hp, node, "id", NULL);
+
+       if (*id < num_possible_cpus())
+               cpu_data(*id).sock_id = sock_id;
+}
+
+static void mark_core_ids(struct mdesc_handle *hp, u64 mp,
+                         int core_id)
+{
+       find_back_node_value(hp, mp, "cpu", __mark_core_id, core_id, 10);
+}
+
+static void mark_sock_ids(struct mdesc_handle *hp, u64 mp,
+                         int sock_id)
+{
+       find_back_node_value(hp, mp, "cpu", __mark_sock_id, sock_id, 10);
+}
+
 static void set_core_ids(struct mdesc_handle *hp)
 {
        int idx;
        u64 mp;
 
        idx = 1;
+
+       /* Identify unique cores by looking for cpus backpointed to by
+        * level 1 instruction caches.
+        */
        mdesc_for_each_node_by_name(hp, mp, "cache") {
                const u64 *level;
                const char *type;
@@ -667,11 +690,72 @@ static void set_core_ids(struct mdesc_handle *hp)
                        continue;
 
                mark_core_ids(hp, mp, idx);
+               idx++;
+       }
+}
+
+static int set_sock_ids_by_cache(struct mdesc_handle *hp, int level)
+{
+       u64 mp;
+       int idx = 1;
+       int fnd = 0;
+
+       /* Identify unique sockets by looking for cpus backpointed to by
+        * shared level n caches.
+        */
+       mdesc_for_each_node_by_name(hp, mp, "cache") {
+               const u64 *cur_lvl;
+
+               cur_lvl = mdesc_get_property(hp, mp, "level", NULL);
+               if (*cur_lvl != level)
+                       continue;
+
+               mark_sock_ids(hp, mp, idx);
+               idx++;
+               fnd = 1;
+       }
+       return fnd;
+}
+
+static void set_sock_ids_by_socket(struct mdesc_handle *hp, u64 mp)
+{
+       int idx = 1;
 
+       mdesc_for_each_node_by_name(hp, mp, "socket") {
+               u64 a;
+
+               mdesc_for_each_arc(a, hp, mp, MDESC_ARC_TYPE_FWD) {
+                       u64 t = mdesc_arc_target(hp, a);
+                       const char *name;
+                       const u64 *id;
+
+                       name = mdesc_node_name(hp, t);
+                       if (strcmp(name, "cpu"))
+                               continue;
+
+                       id = mdesc_get_property(hp, t, "id", NULL);
+                       if (*id < num_possible_cpus())
+                               cpu_data(*id).sock_id = idx;
+               }
                idx++;
        }
 }
 
+static void set_sock_ids(struct mdesc_handle *hp)
+{
+       u64 mp;
+
+       /* If machine description exposes sockets data use it.
+        * Otherwise fallback to use shared L3 or L2 caches.
+        */
+       mp = mdesc_node_by_name(hp, MDESC_NODE_NULL, "sockets");
+       if (mp != MDESC_NODE_NULL)
+               return set_sock_ids_by_socket(hp, mp);
+
+       if (!set_sock_ids_by_cache(hp, 3))
+               set_sock_ids_by_cache(hp, 2);
+}
+
 static void mark_proc_ids(struct mdesc_handle *hp, u64 mp, int proc_id)
 {
        u64 a;
@@ -707,7 +791,6 @@ static void __set_proc_ids(struct mdesc_handle *hp, const char *exec_unit_name)
                        continue;
 
                mark_proc_ids(hp, mp, idx);
-
                idx++;
        }
 }
@@ -900,6 +983,7 @@ void mdesc_fill_in_cpu_data(cpumask_t *mask)
 
        set_core_ids(hp);
        set_proc_ids(hp);
+       set_sock_ids(hp);
 
        mdesc_release(hp);
 
index 6f7251fd2eabc6b5b5790c2aa4486c84c042f814..c928bc64b4bac1b1c5eb88c71c348469796883c5 100644 (file)
@@ -1002,6 +1002,38 @@ static int __init pcibios_init(void)
 subsys_initcall(pcibios_init);
 
 #ifdef CONFIG_SYSFS
+
+#define SLOT_NAME_SIZE  11  /* Max decimal digits + null in u32 */
+
+static void pcie_bus_slot_names(struct pci_bus *pbus)
+{
+       struct pci_dev *pdev;
+       struct pci_bus *bus;
+
+       list_for_each_entry(pdev, &pbus->devices, bus_list) {
+               char name[SLOT_NAME_SIZE];
+               struct pci_slot *pci_slot;
+               const u32 *slot_num;
+               int len;
+
+               slot_num = of_get_property(pdev->dev.of_node,
+                                          "physical-slot#", &len);
+
+               if (slot_num == NULL || len != 4)
+                       continue;
+
+               snprintf(name, sizeof(name), "%u", slot_num[0]);
+               pci_slot = pci_create_slot(pbus, slot_num[0], name, NULL);
+
+               if (IS_ERR(pci_slot))
+                       pr_err("PCI: pci_create_slot returned %ld.\n",
+                              PTR_ERR(pci_slot));
+       }
+
+       list_for_each_entry(bus, &pbus->children, node)
+               pcie_bus_slot_names(bus);
+}
+
 static void pci_bus_slot_names(struct device_node *node, struct pci_bus *bus)
 {
        const struct pci_slot_names {
@@ -1053,18 +1085,29 @@ static int __init of_pci_slot_init(void)
 
        while ((pbus = pci_find_next_bus(pbus)) != NULL) {
                struct device_node *node;
+               struct pci_dev *pdev;
+
+               pdev = list_first_entry(&pbus->devices, struct pci_dev,
+                                       bus_list);
 
-               if (pbus->self) {
-                       /* PCI->PCI bridge */
-                       node = pbus->self->dev.of_node;
+               if (pdev && pci_is_pcie(pdev)) {
+                       pcie_bus_slot_names(pbus);
                } else {
-                       struct pci_pbm_info *pbm = pbus->sysdata;
 
-                       /* Host PCI controller */
-                       node = pbm->op->dev.of_node;
-               }
+                       if (pbus->self) {
+
+                               /* PCI->PCI bridge */
+                               node = pbus->self->dev.of_node;
+
+                       } else {
+                               struct pci_pbm_info *pbm = pbus->sysdata;
 
-               pci_bus_slot_names(node, pbus);
+                               /* Host PCI controller */
+                               node = pbm->op->dev.of_node;
+                       }
+
+                       pci_bus_slot_names(node, pbus);
+               }
        }
 
        return 0;
index c38d19fc27baac8821acc57cf2e42120b7d66a3e..f7b261749383b4992300ba4418b1d16ef7251360 100644 (file)
@@ -255,6 +255,24 @@ void sun4v_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
        }
 }
 
+void sun_m7_patch_2insn_range(struct sun4v_2insn_patch_entry *start,
+                            struct sun4v_2insn_patch_entry *end)
+{
+       while (start < end) {
+               unsigned long addr = start->addr;
+
+               *(unsigned int *) (addr +  0) = start->insns[0];
+               wmb();
+               __asm__ __volatile__("flush     %0" : : "r" (addr +  0));
+
+               *(unsigned int *) (addr +  4) = start->insns[1];
+               wmb();
+               __asm__ __volatile__("flush     %0" : : "r" (addr +  4));
+
+               start++;
+       }
+}
+
 static void __init sun4v_patch(void)
 {
        extern void sun4v_hvapi_init(void);
@@ -267,6 +285,9 @@ static void __init sun4v_patch(void)
 
        sun4v_patch_2insn_range(&__sun4v_2insn_patch,
                                &__sun4v_2insn_patch_end);
+       if (sun4v_chip_type == SUN4V_CHIP_SPARC_M7)
+               sun_m7_patch_2insn_range(&__sun_m7_2insn_patch,
+                                        &__sun_m7_2insn_patch_end);
 
        sun4v_hvapi_init();
 }
index 61139d9924cae4a8fdf5d4d5366a31052ea29616..19cd08d1867285f059f768402e4df14c64d7871d 100644 (file)
@@ -60,8 +60,12 @@ DEFINE_PER_CPU(cpumask_t, cpu_sibling_map) = CPU_MASK_NONE;
 cpumask_t cpu_core_map[NR_CPUS] __read_mostly =
        { [0 ... NR_CPUS-1] = CPU_MASK_NONE };
 
+cpumask_t cpu_core_sib_map[NR_CPUS] __read_mostly = {
+       [0 ... NR_CPUS-1] = CPU_MASK_NONE };
+
 EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
 EXPORT_SYMBOL(cpu_core_map);
+EXPORT_SYMBOL(cpu_core_sib_map);
 
 static cpumask_t smp_commenced_mask;
 
@@ -1243,6 +1247,15 @@ void smp_fill_in_sib_core_maps(void)
                }
        }
 
+       for_each_present_cpu(i)  {
+               unsigned int j;
+
+               for_each_present_cpu(j)  {
+                       if (cpu_data(i).sock_id == cpu_data(j).sock_id)
+                               cpumask_set_cpu(j, &cpu_core_sib_map[i]);
+               }
+       }
+
        for_each_present_cpu(i) {
                unsigned int j;
 
index 09243057cb0b48f7fd1679129db63eb4094a8be6..f1a2f688b28a31fc47d2232f3ed10e9d95930223 100644 (file)
@@ -138,6 +138,11 @@ SECTIONS
                *(.pause_3insn_patch)
                __pause_3insn_patch_end = .;
        }
+       .sun_m7_2insn_patch : {
+               __sun_m7_2insn_patch = .;
+               *(.sun_m7_2insn_patch)
+               __sun_m7_2insn_patch_end = .;
+       }
        PERCPU_SECTION(SMP_CACHE_BYTES)
 
        . = ALIGN(PAGE_SIZE);
index 70d817154fe8bfd04aeaa71f45f15667f4962c23..c399e7b3b035250d66ed4522d2da190dc6169aa3 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/perf_event.h>
 #include <linux/interrupt.h>
 #include <linux/kdebug.h>
+#include <linux/uaccess.h>
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
@@ -29,7 +30,6 @@
 #include <asm/setup.h>
 #include <asm/smp.h>
 #include <asm/traps.h>
-#include <asm/uaccess.h>
 
 #include "mm_32.h"
 
@@ -196,7 +196,7 @@ asmlinkage void do_sparc_fault(struct pt_regs *regs, int text_fault, int write,
         * If we're in an interrupt or have no user
         * context, we must not take the fault..
         */
-       if (in_atomic() || !mm)
+       if (pagefault_disabled() || !mm)
                goto no_context;
 
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
index 4798232494294a7ece0bef232216dd4a26408d88..e9268ea1a68de5364d468973e61b3cbd232c19c9 100644 (file)
 #include <linux/kdebug.h>
 #include <linux/percpu.h>
 #include <linux/context_tracking.h>
+#include <linux/uaccess.h>
 
 #include <asm/page.h>
 #include <asm/pgtable.h>
 #include <asm/openprom.h>
 #include <asm/oplib.h>
-#include <asm/uaccess.h>
 #include <asm/asi.h>
 #include <asm/lsu.h>
 #include <asm/sections.h>
@@ -330,7 +330,7 @@ asmlinkage void __kprobes do_sparc64_fault(struct pt_regs *regs)
         * If we're in an interrupt or have no user
         * context, we must not take the fault..
         */
-       if (in_atomic() || !mm)
+       if (faulthandler_disabled() || !mm)
                goto intr_or_no_mm;
 
        perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
index 449f864f0cefdb8918bf0002d40dc87ba2cb8db3..a454ec5ff07af7f33d8ec548051101c3895e9b82 100644 (file)
@@ -53,7 +53,7 @@ void *kmap_atomic(struct page *page)
        unsigned long vaddr;
        long idx, type;
 
-       /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
+       preempt_disable();
        pagefault_disable();
        if (!PageHighMem(page))
                return page_address(page);
@@ -91,6 +91,7 @@ void __kunmap_atomic(void *kvaddr)
 
        if (vaddr < FIXADDR_START) { // FIXME
                pagefault_enable();
+               preempt_enable();
                return;
        }
 
@@ -126,5 +127,6 @@ void __kunmap_atomic(void *kvaddr)
 
        kmap_atomic_idx_pop();
        pagefault_enable();
+       preempt_enable();
 }
 EXPORT_SYMBOL(__kunmap_atomic);
index 4ca0d6ba5ec8331c67f43f8515eb3737526208bb..c5d08b89a96c811ce3d77d81e0ec7b14570d0ac8 100644 (file)
@@ -54,6 +54,7 @@
 #include "init_64.h"
 
 unsigned long kern_linear_pte_xor[4] __read_mostly;
+static unsigned long page_cache4v_flag;
 
 /* A bitmap, two bits for every 256MB of physical memory.  These two
  * bits determine what page size we use for kernel linear
@@ -1909,11 +1910,24 @@ static void __init sun4u_linear_pte_xor_finalize(void)
 
 static void __init sun4v_linear_pte_xor_finalize(void)
 {
+       unsigned long pagecv_flag;
+
+       /* Bit 9 of TTE is no longer CV bit on M7 processor and it instead
+        * enables MCD error. Do not set bit 9 on M7 processor.
+        */
+       switch (sun4v_chip_type) {
+       case SUN4V_CHIP_SPARC_M7:
+               pagecv_flag = 0x00;
+               break;
+       default:
+               pagecv_flag = _PAGE_CV_4V;
+               break;
+       }
 #ifndef CONFIG_DEBUG_PAGEALLOC
        if (cpu_pgsz_mask & HV_PGSZ_MASK_256MB) {
                kern_linear_pte_xor[1] = (_PAGE_VALID | _PAGE_SZ256MB_4V) ^
                        PAGE_OFFSET;
-               kern_linear_pte_xor[1] |= (_PAGE_CP_4V | _PAGE_CV_4V |
+               kern_linear_pte_xor[1] |= (_PAGE_CP_4V | pagecv_flag |
                                           _PAGE_P_4V | _PAGE_W_4V);
        } else {
                kern_linear_pte_xor[1] = kern_linear_pte_xor[0];
@@ -1922,7 +1936,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
        if (cpu_pgsz_mask & HV_PGSZ_MASK_2GB) {
                kern_linear_pte_xor[2] = (_PAGE_VALID | _PAGE_SZ2GB_4V) ^
                        PAGE_OFFSET;
-               kern_linear_pte_xor[2] |= (_PAGE_CP_4V | _PAGE_CV_4V |
+               kern_linear_pte_xor[2] |= (_PAGE_CP_4V | pagecv_flag |
                                           _PAGE_P_4V | _PAGE_W_4V);
        } else {
                kern_linear_pte_xor[2] = kern_linear_pte_xor[1];
@@ -1931,7 +1945,7 @@ static void __init sun4v_linear_pte_xor_finalize(void)
        if (cpu_pgsz_mask & HV_PGSZ_MASK_16GB) {
                kern_linear_pte_xor[3] = (_PAGE_VALID | _PAGE_SZ16GB_4V) ^
                        PAGE_OFFSET;
-               kern_linear_pte_xor[3] |= (_PAGE_CP_4V | _PAGE_CV_4V |
+               kern_linear_pte_xor[3] |= (_PAGE_CP_4V | pagecv_flag |
                                           _PAGE_P_4V | _PAGE_W_4V);
        } else {
                kern_linear_pte_xor[3] = kern_linear_pte_xor[2];
@@ -1958,6 +1972,13 @@ static phys_addr_t __init available_memory(void)
        return available;
 }
 
+#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
+#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
+#define __DIRTY_BITS_4U         (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
+#define __DIRTY_BITS_4V         (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
+#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
+#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
+
 /* We need to exclude reserved regions. This exclusion will include
  * vmlinux and initrd. To be more precise the initrd size could be used to
  * compute a new lower limit because it is freed later during initialization.
@@ -2034,6 +2055,25 @@ void __init paging_init(void)
        memset(swapper_4m_tsb, 0x40, sizeof(swapper_4m_tsb));
 #endif
 
+       /* TTE.cv bit on sparc v9 occupies the same position as TTE.mcde
+        * bit on M7 processor. This is a conflicting usage of the same
+        * bit. Enabling TTE.cv on M7 would turn on Memory Corruption
+        * Detection error on all pages and this will lead to problems
+        * later. Kernel does not run with MCD enabled and hence rest
+        * of the required steps to fully configure memory corruption
+        * detection are not taken. We need to ensure TTE.mcde is not
+        * set on M7 processor. Compute the value of cacheability
+        * flag for use later taking this into consideration.
+        */
+       switch (sun4v_chip_type) {
+       case SUN4V_CHIP_SPARC_M7:
+               page_cache4v_flag = _PAGE_CP_4V;
+               break;
+       default:
+               page_cache4v_flag = _PAGE_CACHE_4V;
+               break;
+       }
+
        if (tlb_type == hypervisor)
                sun4v_pgprot_init();
        else
@@ -2274,13 +2314,6 @@ void free_initrd_mem(unsigned long start, unsigned long end)
 }
 #endif
 
-#define _PAGE_CACHE_4U (_PAGE_CP_4U | _PAGE_CV_4U)
-#define _PAGE_CACHE_4V (_PAGE_CP_4V | _PAGE_CV_4V)
-#define __DIRTY_BITS_4U         (_PAGE_MODIFIED_4U | _PAGE_WRITE_4U | _PAGE_W_4U)
-#define __DIRTY_BITS_4V         (_PAGE_MODIFIED_4V | _PAGE_WRITE_4V | _PAGE_W_4V)
-#define __ACCESS_BITS_4U (_PAGE_ACCESSED_4U | _PAGE_READ_4U | _PAGE_R)
-#define __ACCESS_BITS_4V (_PAGE_ACCESSED_4V | _PAGE_READ_4V | _PAGE_R)
-
 pgprot_t PAGE_KERNEL __read_mostly;
 EXPORT_SYMBOL(PAGE_KERNEL);
 
@@ -2312,8 +2345,7 @@ int __meminit vmemmap_populate(unsigned long vstart, unsigned long vend,
                    _PAGE_P_4U | _PAGE_W_4U);
        if (tlb_type == hypervisor)
                pte_base = (_PAGE_VALID | _PAGE_SZ4MB_4V |
-                           _PAGE_CP_4V | _PAGE_CV_4V |
-                           _PAGE_P_4V | _PAGE_W_4V);
+                           page_cache4v_flag | _PAGE_P_4V | _PAGE_W_4V);
 
        pte_base |= _PAGE_PMD_HUGE;
 
@@ -2450,14 +2482,14 @@ static void __init sun4v_pgprot_init(void)
        int i;
 
        PAGE_KERNEL = __pgprot (_PAGE_PRESENT_4V | _PAGE_VALID |
-                               _PAGE_CACHE_4V | _PAGE_P_4V |
+                               page_cache4v_flag | _PAGE_P_4V |
                                __ACCESS_BITS_4V | __DIRTY_BITS_4V |
                                _PAGE_EXEC_4V);
        PAGE_KERNEL_LOCKED = PAGE_KERNEL;
 
        _PAGE_IE = _PAGE_IE_4V;
        _PAGE_E = _PAGE_E_4V;
-       _PAGE_CACHE = _PAGE_CACHE_4V;
+       _PAGE_CACHE = page_cache4v_flag;
 
 #ifdef CONFIG_DEBUG_PAGEALLOC
        kern_linear_pte_xor[0] = _PAGE_VALID ^ PAGE_OFFSET;
@@ -2465,8 +2497,8 @@ static void __init sun4v_pgprot_init(void)
        kern_linear_pte_xor[0] = (_PAGE_VALID | _PAGE_SZ4MB_4V) ^
                PAGE_OFFSET;
 #endif
-       kern_linear_pte_xor[0] |= (_PAGE_CP_4V | _PAGE_CV_4V |
-                                  _PAGE_P_4V | _PAGE_W_4V);
+       kern_linear_pte_xor[0] |= (page_cache4v_flag | _PAGE_P_4V |
+                                  _PAGE_W_4V);
 
        for (i = 1; i < 4; i++)
                kern_linear_pte_xor[i] = kern_linear_pte_xor[0];
@@ -2479,12 +2511,12 @@ static void __init sun4v_pgprot_init(void)
                             _PAGE_SZ4MB_4V | _PAGE_SZ512K_4V |
                             _PAGE_SZ64K_4V | _PAGE_SZ8K_4V);
 
-       page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | _PAGE_CACHE_4V;
-       page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
+       page_none = _PAGE_PRESENT_4V | _PAGE_ACCESSED_4V | page_cache4v_flag;
+       page_shared = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
                       __ACCESS_BITS_4V | _PAGE_WRITE_4V | _PAGE_EXEC_4V);
-       page_copy   = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
+       page_copy   = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
                       __ACCESS_BITS_4V | _PAGE_EXEC_4V);
-       page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | _PAGE_CACHE_4V |
+       page_readonly = (_PAGE_VALID | _PAGE_PRESENT_4V | page_cache4v_flag |
                         __ACCESS_BITS_4V | _PAGE_EXEC_4V);
 
        page_exec_bit = _PAGE_EXEC_4V;
@@ -2542,7 +2574,7 @@ static unsigned long kern_large_tte(unsigned long paddr)
               _PAGE_EXEC_4U | _PAGE_L_4U | _PAGE_W_4U);
        if (tlb_type == hypervisor)
                val = (_PAGE_VALID | _PAGE_SZ4MB_4V |
-                      _PAGE_CP_4V | _PAGE_CV_4V | _PAGE_P_4V |
+                      page_cache4v_flag | _PAGE_P_4V |
                       _PAGE_EXEC_4V | _PAGE_W_4V);
 
        return val | paddr;
@@ -2706,7 +2738,7 @@ void hugetlb_setup(struct pt_regs *regs)
        struct mm_struct *mm = current->mm;
        struct tsb_config *tp;
 
-       if (in_atomic() || !mm) {
+       if (faulthandler_disabled() || !mm) {
                const struct exception_table_entry *entry;
 
                entry = search_exception_tables(regs->tpc);
index 7b11c5fadd4220f5e8694b6e2a2f906ccf4a50d3..0496970cef8296237edcf1f4f688dffa8af5ecfc 100644 (file)
@@ -105,9 +105,6 @@ static inline long atomic64_add_unless(atomic64_t *v, long a, long u)
 
 #define atomic64_inc_not_zero(v)       atomic64_add_unless((v), 1, 0)
 
-/* Define this to indicate that cmpxchg is an efficient operation. */
-#define __HAVE_ARCH_CMPXCHG
-
 #endif /* !__ASSEMBLY__ */
 
 #endif /* _ASM_TILE_ATOMIC_64_H */
index 6ef4ecab1df29bb86b5dbca9c0f900f0ae7133f7..dc61de15c1f936d4990d5b8d06b1e0d4ab0eb913 100644 (file)
@@ -54,7 +54,7 @@ extern void iounmap(volatile void __iomem *addr);
 
 #define ioremap_nocache(physaddr, size)                ioremap(physaddr, size)
 #define ioremap_wc(physaddr, size)             ioremap(physaddr, size)
-#define ioremap_writethrough(physaddr, size)   ioremap(physaddr, size)
+#define ioremap_wt(physaddr, size)             ioremap(physaddr, size)
 #define ioremap_fullcache(physaddr, size)      ioremap(physaddr, size)
 
 #define mmiowb()
index 938311844233b8c7e2753b5982bbdd0a302d5304..76b0d0ebb24433e1b064cda5dbb23eef5e9127dc 100644 (file)
@@ -55,7 +55,7 @@ static inline const struct cpumask *cpumask_of_node(int node)
 #define topology_physical_package_id(cpu)       ((void)(cpu), 0)
 #define topology_core_id(cpu)                   (cpu)
 #define topology_core_cpumask(cpu)              ((void)(cpu), cpu_online_mask)
-#define topology_thread_cpumask(cpu)            cpumask_of(cpu)
+#define topology_sibling_cpumask(cpu)           cpumask_of(cpu)
 #endif
 
 #endif /* _ASM_TILE_TOPOLOGY_H */
index f41cb53cf645dcb59287550aff82833e526c2e2c..a33276bf5ca1dfd284bc50545e107800c93cf651 100644 (file)
@@ -78,7 +78,8 @@ int __range_ok(unsigned long addr, unsigned long size);
  * @addr: User space pointer to start of block to check
  * @size: Size of block to check
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * Checks if a pointer to a block of memory in user space is valid.
  *
@@ -192,7 +193,8 @@ extern int __get_user_bad(void)
  * @x:   Variable to store result.
  * @ptr: Source address, in user space.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * This macro copies a single simple variable from user space to kernel
  * space.  It supports simple types like char and int, but not larger
@@ -274,7 +276,8 @@ extern int __put_user_bad(void)
  * @x:   Value to copy to user space.
  * @ptr: Destination address, in user space.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * This macro copies a single simple value from kernel space to user
  * space.  It supports simple types like char and int, but not larger
@@ -330,7 +333,8 @@ extern int __put_user_bad(void)
  * @from: Source address, in kernel space.
  * @n:    Number of bytes to copy.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * Copy data from kernel space to user space.  Caller must check
  * the specified block with access_ok() before calling this function.
@@ -366,7 +370,8 @@ copy_to_user(void __user *to, const void *from, unsigned long n)
  * @from: Source address, in user space.
  * @n:    Number of bytes to copy.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * Copy data from user space to kernel space.  Caller must check
  * the specified block with access_ok() before calling this function.
@@ -437,7 +442,8 @@ static inline unsigned long __must_check copy_from_user(void *to,
  * @from: Source address, in user space.
  * @n:    Number of bytes to copy.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * Copy data from user space to user space.  Caller must check
  * the specified blocks with access_ok() before calling this function.
index e83cc999da029b469fde1f2e7f2b4d0f5a96dbb6..3f4f58d34a92b6029615756892c1fe207d6f6701 100644 (file)
@@ -354,9 +354,9 @@ static int handle_page_fault(struct pt_regs *regs,
 
        /*
         * If we're in an interrupt, have no user context or are running in an
-        * atomic region then we must not take the fault.
+        * region with pagefaults disabled then we must not take the fault.
         */
-       if (in_atomic() || !mm) {
+       if (pagefault_disabled() || !mm) {
                vma = NULL;  /* happy compiler */
                goto bad_area_nosemaphore;
        }
index 6aa2f26254471e730e29b0c1f1382c3f7a4d0f64..fcd545014e79dcc83a662f4db5a37e7511e695a0 100644 (file)
@@ -201,7 +201,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
        int idx, type;
        pte_t *pte;
 
-       /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
+       preempt_disable();
        pagefault_disable();
 
        /* Avoid icache flushes by disallowing atomic executable mappings. */
@@ -259,6 +259,7 @@ void __kunmap_atomic(void *kvaddr)
        }
 
        pagefault_enable();
+       preempt_enable();
 }
 EXPORT_SYMBOL(__kunmap_atomic);
 
index 8e4daf44e9805ed2380b72826a699ed139819ee3..47ff9b7f3e5d39d8a05e088b1308568836d4e332 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/sched.h>
 #include <linux/hardirq.h>
 #include <linux/module.h>
+#include <linux/uaccess.h>
 #include <asm/current.h>
 #include <asm/pgtable.h>
 #include <asm/tlbflush.h>
@@ -35,10 +36,10 @@ int handle_page_fault(unsigned long address, unsigned long ip,
        *code_out = SEGV_MAPERR;
 
        /*
-        * If the fault was during atomic operation, don't take the fault, just
+        * If the fault was with pagefaults disabled, don't take the fault, just
         * fail.
         */
-       if (in_atomic())
+       if (faulthandler_disabled())
                goto out_nosemaphore;
 
        if (is_user)
index 0dc922dba9154d7cfcfe5352ec8ec77169e8082f..afccef5529ccb8d44409519aa075476af747722e 100644 (file)
@@ -218,7 +218,7 @@ static int do_pf(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
         * If we're in an interrupt or have no user
         * context, we must not take the fault..
         */
-       if (in_atomic() || !mm)
+       if (faulthandler_disabled() || !mm)
                goto no_context;
 
        if (user_mode(regs))
index 3942f74c92d7d338ee0c8a858ad2e5a81774686d..1538562cc720e78132d0eb31c38116b029d56ce0 100644 (file)
@@ -1,3 +1,6 @@
+
+obj-y += entry/
+
 obj-$(CONFIG_KVM) += kvm/
 
 # Xen paravirtualization support
@@ -11,7 +14,7 @@ obj-y += kernel/
 obj-y += mm/
 
 obj-y += crypto/
-obj-y += vdso/
+
 obj-$(CONFIG_IA32_EMULATION) += ia32/
 
 obj-y += platform/
index 226d5696e1d1dd5fe715124710b0f2cd04f8c495..7e39f9b227056d80a828faa1b2683d9cceb14d98 100644 (file)
@@ -9,140 +9,141 @@ config 64BIT
 config X86_32
        def_bool y
        depends on !64BIT
-       select CLKSRC_I8253
-       select HAVE_UID16
 
 config X86_64
        def_bool y
        depends on 64BIT
-       select X86_DEV_DMA_OPS
-       select ARCH_USE_CMPXCHG_LOCKREF
-       select HAVE_LIVEPATCH
 
 ### Arch settings
 config X86
        def_bool y
-       select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
-       select ARCH_MIGHT_HAVE_ACPI_PDC if ACPI
+       select ACPI_LEGACY_TABLES_LOOKUP        if ACPI
+       select ACPI_SYSTEM_POWER_STATES_SUPPORT if ACPI
+       select ANON_INODES
+       select ARCH_CLOCKSOURCE_DATA
+       select ARCH_DISCARD_MEMBLOCK
+       select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
        select ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS
+       select ARCH_HAS_ELF_RANDOMIZE
        select ARCH_HAS_FAST_MULTIPLIER
        select ARCH_HAS_GCOV_PROFILE_ALL
+       select ARCH_HAS_SG_CHAIN
+       select ARCH_HAVE_NMI_SAFE_CMPXCHG
+       select ARCH_MIGHT_HAVE_ACPI_PDC         if ACPI
        select ARCH_MIGHT_HAVE_PC_PARPORT
        select ARCH_MIGHT_HAVE_PC_SERIO
-       select HAVE_AOUT if X86_32
-       select HAVE_UNSTABLE_SCHED_CLOCK
-       select ARCH_SUPPORTS_NUMA_BALANCING if X86_64
-       select ARCH_SUPPORTS_INT128 if X86_64
-       select HAVE_IDE
-       select HAVE_OPROFILE
-       select HAVE_PCSPKR_PLATFORM
-       select HAVE_PERF_EVENTS
-       select HAVE_IOREMAP_PROT
-       select HAVE_KPROBES
-       select HAVE_MEMBLOCK
-       select HAVE_MEMBLOCK_NODE_MAP
-       select ARCH_DISCARD_MEMBLOCK
-       select ARCH_WANT_OPTIONAL_GPIOLIB
+       select ARCH_SUPPORTS_ATOMIC_RMW
+       select ARCH_SUPPORTS_INT128             if X86_64
+       select ARCH_SUPPORTS_NUMA_BALANCING     if X86_64
+       select ARCH_USE_BUILTIN_BSWAP
+       select ARCH_USE_CMPXCHG_LOCKREF         if X86_64
+       select ARCH_USE_QUEUED_RWLOCKS
+       select ARCH_USE_QUEUED_SPINLOCKS
        select ARCH_WANT_FRAME_POINTERS
-       select HAVE_DMA_ATTRS
-       select HAVE_DMA_CONTIGUOUS
-       select HAVE_KRETPROBES
+       select ARCH_WANT_IPC_PARSE_VERSION      if X86_32
+       select ARCH_WANT_OPTIONAL_GPIOLIB
+       select BUILDTIME_EXTABLE_SORT
+       select CLKEVT_I8253
+       select CLKSRC_I8253                     if X86_32
+       select CLOCKSOURCE_VALIDATE_LAST_CYCLE
+       select CLOCKSOURCE_WATCHDOG
+       select CLONE_BACKWARDS                  if X86_32
+       select COMPAT_OLD_SIGACTION             if IA32_EMULATION
+       select DCACHE_WORD_ACCESS
+       select GENERIC_CLOCKEVENTS
+       select GENERIC_CLOCKEVENTS_BROADCAST    if X86_64 || (X86_32 && X86_LOCAL_APIC)
+       select GENERIC_CLOCKEVENTS_MIN_ADJUST
+       select GENERIC_CMOS_UPDATE
+       select GENERIC_CPU_AUTOPROBE
        select GENERIC_EARLY_IOREMAP
-       select HAVE_OPTPROBES
-       select HAVE_KPROBES_ON_FTRACE
-       select HAVE_FTRACE_MCOUNT_RECORD
-       select HAVE_FENTRY if X86_64
+       select GENERIC_FIND_FIRST_BIT
+       select GENERIC_IOMAP
+       select GENERIC_IRQ_PROBE
+       select GENERIC_IRQ_SHOW
+       select GENERIC_PENDING_IRQ              if SMP
+       select GENERIC_SMP_IDLE_THREAD
+       select GENERIC_STRNCPY_FROM_USER
+       select GENERIC_STRNLEN_USER
+       select GENERIC_TIME_VSYSCALL
+       select HAVE_ACPI_APEI                   if ACPI
+       select HAVE_ACPI_APEI_NMI               if ACPI
+       select HAVE_ALIGNED_STRUCT_PAGE         if SLUB
+       select HAVE_AOUT                        if X86_32
+       select HAVE_ARCH_AUDITSYSCALL
+       select HAVE_ARCH_HUGE_VMAP              if X86_64 || X86_PAE
+       select HAVE_ARCH_JUMP_LABEL
+       select HAVE_ARCH_KASAN                  if X86_64 && SPARSEMEM_VMEMMAP
+       select HAVE_ARCH_KGDB
+       select HAVE_ARCH_KMEMCHECK
+       select HAVE_ARCH_SECCOMP_FILTER
+       select HAVE_ARCH_SOFT_DIRTY             if X86_64
+       select HAVE_ARCH_TRACEHOOK
+       select HAVE_ARCH_TRANSPARENT_HUGEPAGE
+       select HAVE_BPF_JIT                     if X86_64
+       select HAVE_CC_STACKPROTECTOR
+       select HAVE_CMPXCHG_DOUBLE
+       select HAVE_CMPXCHG_LOCAL
+       select HAVE_CONTEXT_TRACKING            if X86_64
        select HAVE_C_RECORDMCOUNT
+       select HAVE_DEBUG_KMEMLEAK
+       select HAVE_DEBUG_STACKOVERFLOW
+       select HAVE_DMA_API_DEBUG
+       select HAVE_DMA_ATTRS
+       select HAVE_DMA_CONTIGUOUS
        select HAVE_DYNAMIC_FTRACE
        select HAVE_DYNAMIC_FTRACE_WITH_REGS
-       select HAVE_FUNCTION_TRACER
-       select HAVE_FUNCTION_GRAPH_TRACER
-       select HAVE_FUNCTION_GRAPH_FP_TEST
-       select HAVE_SYSCALL_TRACEPOINTS
-       select SYSCTL_EXCEPTION_TRACE
-       select HAVE_KVM
-       select HAVE_ARCH_KGDB
-       select HAVE_ARCH_TRACEHOOK
-       select HAVE_GENERIC_DMA_COHERENT if X86_32
        select HAVE_EFFICIENT_UNALIGNED_ACCESS
-       select USER_STACKTRACE_SUPPORT
-       select HAVE_REGS_AND_STACK_ACCESS_API
-       select HAVE_DMA_API_DEBUG
-       select HAVE_KERNEL_GZIP
+       select HAVE_FENTRY                      if X86_64
+       select HAVE_FTRACE_MCOUNT_RECORD
+       select HAVE_FUNCTION_GRAPH_FP_TEST
+       select HAVE_FUNCTION_GRAPH_TRACER
+       select HAVE_FUNCTION_TRACER
+       select HAVE_GENERIC_DMA_COHERENT        if X86_32
+       select HAVE_HW_BREAKPOINT
+       select HAVE_IDE
+       select HAVE_IOREMAP_PROT
+       select HAVE_IRQ_EXIT_ON_IRQ_STACK       if X86_64
+       select HAVE_IRQ_TIME_ACCOUNTING
        select HAVE_KERNEL_BZIP2
+       select HAVE_KERNEL_GZIP
+       select HAVE_KERNEL_LZ4
        select HAVE_KERNEL_LZMA
-       select HAVE_KERNEL_XZ
        select HAVE_KERNEL_LZO
-       select HAVE_KERNEL_LZ4
-       select HAVE_HW_BREAKPOINT
+       select HAVE_KERNEL_XZ
+       select HAVE_KPROBES
+       select HAVE_KPROBES_ON_FTRACE
+       select HAVE_KRETPROBES
+       select HAVE_KVM
+       select HAVE_LIVEPATCH                   if X86_64
+       select HAVE_MEMBLOCK
+       select HAVE_MEMBLOCK_NODE_MAP
        select HAVE_MIXED_BREAKPOINTS_REGS
-       select PERF_EVENTS
+       select HAVE_OPROFILE
+       select HAVE_OPTPROBES
+       select HAVE_PCSPKR_PLATFORM
+       select HAVE_PERF_EVENTS
        select HAVE_PERF_EVENTS_NMI
        select HAVE_PERF_REGS
        select HAVE_PERF_USER_STACK_DUMP
-       select HAVE_DEBUG_KMEMLEAK
-       select ANON_INODES
-       select HAVE_ALIGNED_STRUCT_PAGE if SLUB
-       select HAVE_CMPXCHG_LOCAL
-       select HAVE_CMPXCHG_DOUBLE
-       select HAVE_ARCH_KMEMCHECK
-       select HAVE_ARCH_KASAN if X86_64 && SPARSEMEM_VMEMMAP
+       select HAVE_REGS_AND_STACK_ACCESS_API
+       select HAVE_SYSCALL_TRACEPOINTS
+       select HAVE_UID16                       if X86_32
+       select HAVE_UNSTABLE_SCHED_CLOCK
        select HAVE_USER_RETURN_NOTIFIER
-       select ARCH_HAS_ELF_RANDOMIZE
-       select HAVE_ARCH_JUMP_LABEL
-       select ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE
-       select SPARSE_IRQ
-       select GENERIC_FIND_FIRST_BIT
-       select GENERIC_IRQ_PROBE
-       select GENERIC_PENDING_IRQ if SMP
-       select GENERIC_IRQ_SHOW
-       select GENERIC_CLOCKEVENTS_MIN_ADJUST
        select IRQ_FORCED_THREADING
-       select HAVE_BPF_JIT if X86_64
-       select HAVE_ARCH_TRANSPARENT_HUGEPAGE
-       select HAVE_ARCH_HUGE_VMAP if X86_64 || (X86_32 && X86_PAE)
-       select ARCH_HAS_SG_CHAIN
-       select CLKEVT_I8253
-       select ARCH_HAVE_NMI_SAFE_CMPXCHG
-       select GENERIC_IOMAP
-       select DCACHE_WORD_ACCESS
-       select GENERIC_SMP_IDLE_THREAD
-       select ARCH_WANT_IPC_PARSE_VERSION if X86_32
-       select HAVE_ARCH_SECCOMP_FILTER
-       select BUILDTIME_EXTABLE_SORT
-       select GENERIC_CMOS_UPDATE
-       select HAVE_ARCH_SOFT_DIRTY if X86_64
-       select CLOCKSOURCE_WATCHDOG
-       select GENERIC_CLOCKEVENTS
-       select ARCH_CLOCKSOURCE_DATA
-       select CLOCKSOURCE_VALIDATE_LAST_CYCLE
-       select GENERIC_CLOCKEVENTS_BROADCAST if X86_64 || (X86_32 && X86_LOCAL_APIC)
-       select GENERIC_TIME_VSYSCALL
-       select GENERIC_STRNCPY_FROM_USER
-       select GENERIC_STRNLEN_USER
-       select HAVE_CONTEXT_TRACKING if X86_64
-       select HAVE_IRQ_TIME_ACCOUNTING
-       select VIRT_TO_BUS
-       select MODULES_USE_ELF_REL if X86_32
-       select MODULES_USE_ELF_RELA if X86_64
-       select CLONE_BACKWARDS if X86_32
-       select ARCH_USE_BUILTIN_BSWAP
-       select ARCH_USE_QUEUE_RWLOCK
-       select OLD_SIGSUSPEND3 if X86_32 || IA32_EMULATION
-       select OLD_SIGACTION if X86_32
-       select COMPAT_OLD_SIGACTION if IA32_EMULATION
+       select MODULES_USE_ELF_RELA             if X86_64
+       select MODULES_USE_ELF_REL              if X86_32
+       select OLD_SIGACTION                    if X86_32
+       select OLD_SIGSUSPEND3                  if X86_32 || IA32_EMULATION
+       select PERF_EVENTS
        select RTC_LIB
-       select HAVE_DEBUG_STACKOVERFLOW
-       select HAVE_IRQ_EXIT_ON_IRQ_STACK if X86_64
-       select HAVE_CC_STACKPROTECTOR
-       select GENERIC_CPU_AUTOPROBE
-       select HAVE_ARCH_AUDITSYSCALL
-       select ARCH_SUPPORTS_ATOMIC_RMW
-       select HAVE_ACPI_APEI if ACPI
-       select HAVE_ACPI_APEI_NMI if ACPI
-       select ACPI_LEGACY_TABLES_LOOKUP if ACPI
-       select X86_FEATURE_NAMES if PROC_FS
+       select SPARSE_IRQ
        select SRCU
+       select SYSCTL_EXCEPTION_TRACE
+       select USER_STACKTRACE_SUPPORT
+       select VIRT_TO_BUS
+       select X86_DEV_DMA_OPS                  if X86_64
+       select X86_FEATURE_NAMES                if PROC_FS
 
 config INSTRUCTION_DECODER
        def_bool y
@@ -260,10 +261,6 @@ config X86_64_SMP
        def_bool y
        depends on X86_64 && SMP
 
-config X86_HT
-       def_bool y
-       depends on SMP
-
 config X86_32_LAZY_GS
        def_bool y
        depends on X86_32 && !CC_STACKPROTECTOR
@@ -341,7 +338,7 @@ config X86_FEATURE_NAMES
 
 config X86_X2APIC
        bool "Support x2apic"
-       depends on X86_LOCAL_APIC && X86_64 && IRQ_REMAP
+       depends on X86_LOCAL_APIC && X86_64 && (IRQ_REMAP || HYPERVISOR_GUEST)
        ---help---
          This enables x2apic support on CPUs that have this feature.
 
@@ -441,6 +438,7 @@ config X86_UV
        depends on X86_EXTENDED_PLATFORM
        depends on NUMA
        depends on X86_X2APIC
+       depends on PCI
        ---help---
          This option is needed in order to support SGI Ultraviolet systems.
          If you don't have one of these, you should say N here.
@@ -466,7 +464,6 @@ config X86_INTEL_CE
        select X86_REBOOTFIXUPS
        select OF
        select OF_EARLY_FLATTREE
-       select IRQ_DOMAIN
        ---help---
          Select for the Intel CE media processor (CE4100) SOC.
          This option compiles in support for the CE4100 SOC for settop
@@ -666,7 +663,7 @@ config PARAVIRT_DEBUG
 config PARAVIRT_SPINLOCKS
        bool "Paravirtualization layer for spinlocks"
        depends on PARAVIRT && SMP
-       select UNINLINE_SPIN_UNLOCK
+       select UNINLINE_SPIN_UNLOCK if !QUEUED_SPINLOCKS
        ---help---
          Paravirtualized spinlocks allow a pvops backend to replace the
          spinlock implementation with something virtualization-friendly
@@ -851,11 +848,12 @@ config NR_CPUS
        default "1" if !SMP
        default "8192" if MAXSMP
        default "32" if SMP && X86_BIGSMP
-       default "8" if SMP
+       default "8" if SMP && X86_32
+       default "64" if SMP
        ---help---
          This allows you to specify the maximum number of CPUs which this
          kernel will support.  If CPUMASK_OFFSTACK is enabled, the maximum
-         supported value is 4096, otherwise the maximum value is 512.  The
+         supported value is 8192, otherwise the maximum value is 512.  The
          minimum value which makes sense is 2.
 
          This is purely to save memory - each supported CPU adds
@@ -863,7 +861,7 @@ config NR_CPUS
 
 config SCHED_SMT
        bool "SMT (Hyperthreading) scheduler support"
-       depends on X86_HT
+       depends on SMP
        ---help---
          SMT scheduler support improves the CPU scheduler's decision making
          when dealing with Intel Pentium 4 chips with HyperThreading at a
@@ -873,7 +871,7 @@ config SCHED_SMT
 config SCHED_MC
        def_bool y
        prompt "Multi-core scheduler support"
-       depends on X86_HT
+       depends on SMP
        ---help---
          Multi-core scheduler support improves the CPU scheduler's decision
          making when dealing with multi-core CPU chips at a cost of slightly
@@ -914,12 +912,12 @@ config X86_UP_IOAPIC
 config X86_LOCAL_APIC
        def_bool y
        depends on X86_64 || SMP || X86_32_NON_STANDARD || X86_UP_APIC || PCI_MSI
-       select GENERIC_IRQ_LEGACY_ALLOC_HWIRQ
+       select IRQ_DOMAIN_HIERARCHY
+       select PCI_MSI_IRQ_DOMAIN if PCI_MSI
 
 config X86_IO_APIC
        def_bool y
        depends on X86_LOCAL_APIC || X86_UP_IOAPIC
-       select IRQ_DOMAIN
 
 config X86_REROUTE_FOR_BROKEN_BOOT_IRQS
        bool "Reroute for broken boot IRQs"
index 72484a645f056d1d7a8aa4182b07fc908c3b4162..a15893d17c55988b542ad362f8c0074958130587 100644 (file)
@@ -332,4 +332,27 @@ config X86_DEBUG_STATIC_CPU_HAS
 
          If unsure, say N.
 
+config X86_DEBUG_FPU
+       bool "Debug the x86 FPU code"
+       depends on DEBUG_KERNEL
+       default y
+       ---help---
+         If this option is enabled then there will be extra sanity
+         checks and (boot time) debug printouts added to the kernel.
+         This debugging adds some small amount of runtime overhead
+         to the kernel.
+
+         If unsure, say N.
+
+config PUNIT_ATOM_DEBUG
+       tristate "ATOM Punit debug driver"
+       select DEBUG_FS
+       select IOSF_MBI
+       ---help---
+         This is a debug driver, which gets the power states
+         of all Punit North Complex devices. The power states of
+         each device is exposed as part of the debugfs interface.
+         The current power state can be read from
+         /sys/kernel/debug/punit_atom/dev_power_state
+
 endmenu
index 2fda005bb33443c1684546dff0620f8db48e7c5c..118e6debc483ed6f6d6357209619216843ee670c 100644 (file)
@@ -77,6 +77,12 @@ else
         KBUILD_AFLAGS += -m64
         KBUILD_CFLAGS += -m64
 
+        # Align jump targets to 1 byte, not the default 16 bytes:
+        KBUILD_CFLAGS += -falign-jumps=1
+
+        # Pack loops tightly as well:
+        KBUILD_CFLAGS += -falign-loops=1
+
         # Don't autogenerate traditional x87 instructions
         KBUILD_CFLAGS += $(call cc-option,-mno-80387)
         KBUILD_CFLAGS += $(call cc-option,-mno-fp-ret-in-387)
@@ -84,6 +90,9 @@ else
        # Use -mpreferred-stack-boundary=3 if supported.
        KBUILD_CFLAGS += $(call cc-option,-mpreferred-stack-boundary=3)
 
+       # Use -mskip-rax-setup if supported.
+       KBUILD_CFLAGS += $(call cc-option,-mskip-rax-setup)
+
         # FIXME - should be integrated in Makefile.cpu (Makefile_32.cpu)
         cflags-$(CONFIG_MK8) += $(call cc-option,-march=k8)
         cflags-$(CONFIG_MPSC) += $(call cc-option,-march=nocona)
@@ -140,12 +149,6 @@ endif
 sp-$(CONFIG_X86_32) := esp
 sp-$(CONFIG_X86_64) := rsp
 
-# do binutils support CFI?
-cfi := $(call as-instr,.cfi_startproc\n.cfi_rel_offset $(sp-y)$(comma)0\n.cfi_endproc,-DCONFIG_AS_CFI=1)
-# is .cfi_signal_frame supported too?
-cfi-sigframe := $(call as-instr,.cfi_startproc\n.cfi_signal_frame\n.cfi_endproc,-DCONFIG_AS_CFI_SIGNAL_FRAME=1)
-cfi-sections := $(call as-instr,.cfi_sections .debug_frame,-DCONFIG_AS_CFI_SECTIONS=1)
-
 # does binutils support specific instructions?
 asinstr := $(call as-instr,fxsaveq (%rax),-DCONFIG_AS_FXSAVEQ=1)
 asinstr += $(call as-instr,pshufb %xmm0$(comma)%xmm0,-DCONFIG_AS_SSSE3=1)
@@ -153,8 +156,8 @@ asinstr += $(call as-instr,crc32l %eax$(comma)%eax,-DCONFIG_AS_CRC32=1)
 avx_instr := $(call as-instr,vxorps %ymm0$(comma)%ymm1$(comma)%ymm2,-DCONFIG_AS_AVX=1)
 avx2_instr :=$(call as-instr,vpbroadcastb %xmm0$(comma)%ymm1,-DCONFIG_AS_AVX2=1)
 
-KBUILD_AFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr)
-KBUILD_CFLAGS += $(cfi) $(cfi-sigframe) $(cfi-sections) $(asinstr) $(avx_instr) $(avx2_instr)
+KBUILD_AFLAGS += $(asinstr) $(avx_instr) $(avx2_instr)
+KBUILD_CFLAGS += $(asinstr) $(avx_instr) $(avx2_instr)
 
 LDFLAGS := -m elf_$(UTS_MACHINE)
 
@@ -178,7 +181,7 @@ archscripts: scripts_basic
 # Syscall table generation
 
 archheaders:
-       $(Q)$(MAKE) $(build)=arch/x86/syscalls all
+       $(Q)$(MAKE) $(build)=arch/x86/entry/syscalls all
 
 archprepare:
 ifeq ($(CONFIG_KEXEC_FILE),y)
@@ -241,7 +244,7 @@ install:
 
 PHONY += vdso_install
 vdso_install:
-       $(Q)$(MAKE) $(build)=arch/x86/vdso $@
+       $(Q)$(MAKE) $(build)=arch/x86/entry/vdso $@
 
 archclean:
        $(Q)rm -rf $(objtree)/arch/i386
index 89dd0d78013aaff6c889340e0e3caceb4c8f8c88..805d25ca5f1db1602498c7047025b973ac788b3c 100644 (file)
@@ -2,15 +2,14 @@
 #define BOOT_COMPRESSED_MISC_H
 
 /*
- * we have to be careful, because no indirections are allowed here, and
- * paravirt_ops is a kind of one. As it will only run in baremetal anyway,
- * we just keep it from happening
+ * Special hack: we have to be careful, because no indirections are allowed here,
+ * and paravirt_ops is a kind of one. As it will only run in baremetal anyway,
+ * we just keep it from happening. (This list needs to be extended when new
+ * paravirt and debugging variants are added.)
  */
 #undef CONFIG_PARAVIRT
+#undef CONFIG_PARAVIRT_SPINLOCKS
 #undef CONFIG_KASAN
-#ifdef CONFIG_X86_32
-#define _ASM_X86_DESC_H 1
-#endif
 
 #include <linux/linkage.h>
 #include <linux/screen_info.h>
index 112cefacf2af07b216cb598232bcc586a76fa919..b419f43ce0c589ce934195b77296760dc300830c 100644 (file)
@@ -32,7 +32,7 @@
 #include <crypto/lrw.h>
 #include <crypto/xts.h>
 #include <asm/cpu_device_id.h>
-#include <asm/i387.h>
+#include <asm/fpu/api.h>
 #include <asm/crypto/aes.h>
 #include <crypto/ablk_helper.h>
 #include <crypto/scatterwalk.h>
index baf0ac21ace5664835ed9435207ddabc3dc0cc7a..4c65c70e628bb2776c17c106973b2f2b668890fb 100644 (file)
@@ -19,8 +19,7 @@
 #include <crypto/ctr.h>
 #include <crypto/lrw.h>
 #include <crypto/xts.h>
-#include <asm/xcr.h>
-#include <asm/xsave.h>
+#include <asm/fpu/api.h>
 #include <asm/crypto/camellia.h>
 #include <asm/crypto/glue_helper.h>
 
@@ -561,16 +560,15 @@ static struct crypto_alg cmll_algs[10] = { {
 
 static int __init camellia_aesni_init(void)
 {
-       u64 xcr0;
+       const char *feature_name;
 
        if (!cpu_has_avx2 || !cpu_has_avx || !cpu_has_aes || !cpu_has_osxsave) {
                pr_info("AVX2 or AES-NI instructions are not detected.\n");
                return -ENODEV;
        }
 
-       xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
-       if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
-               pr_info("AVX2 detected but unusable.\n");
+       if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) {
+               pr_info("CPU feature '%s' is not supported.\n", feature_name);
                return -ENODEV;
        }
 
index 78818a1e73e3f62b0e7be68123fcef148e978f77..80a0e4389c9ad3f5e6e1f6d8bc5292e391801ff2 100644 (file)
@@ -19,8 +19,7 @@
 #include <crypto/ctr.h>
 #include <crypto/lrw.h>
 #include <crypto/xts.h>
-#include <asm/xcr.h>
-#include <asm/xsave.h>
+#include <asm/fpu/api.h>
 #include <asm/crypto/camellia.h>
 #include <asm/crypto/glue_helper.h>
 
@@ -553,16 +552,10 @@ static struct crypto_alg cmll_algs[10] = { {
 
 static int __init camellia_aesni_init(void)
 {
-       u64 xcr0;
+       const char *feature_name;
 
-       if (!cpu_has_avx || !cpu_has_aes || !cpu_has_osxsave) {
-               pr_info("AVX or AES-NI instructions are not detected.\n");
-               return -ENODEV;
-       }
-
-       xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
-       if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
-               pr_info("AVX detected but unusable.\n");
+       if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) {
+               pr_info("CPU feature '%s' is not supported.\n", feature_name);
                return -ENODEV;
        }
 
index 236c80974457b97575a585911aabbdf72b23c4f8..be00aa48b2b5e3044ea8a397b0df37428e409447 100644 (file)
@@ -31,8 +31,7 @@
 #include <crypto/cast5.h>
 #include <crypto/cryptd.h>
 #include <crypto/ctr.h>
-#include <asm/xcr.h>
-#include <asm/xsave.h>
+#include <asm/fpu/api.h>
 #include <asm/crypto/glue_helper.h>
 
 #define CAST5_PARALLEL_BLOCKS 16
@@ -468,16 +467,10 @@ static struct crypto_alg cast5_algs[6] = { {
 
 static int __init cast5_init(void)
 {
-       u64 xcr0;
+       const char *feature_name;
 
-       if (!cpu_has_avx || !cpu_has_osxsave) {
-               pr_info("AVX instructions are not detected.\n");
-               return -ENODEV;
-       }
-
-       xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
-       if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
-               pr_info("AVX detected but unusable.\n");
+       if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) {
+               pr_info("CPU feature '%s' is not supported.\n", feature_name);
                return -ENODEV;
        }
 
index f448810ca4ac1dbac428d9eadee47ecaf546c6a3..5dbba72242217541a92056068378abd86e67c378 100644 (file)
@@ -36,8 +36,7 @@
 #include <crypto/ctr.h>
 #include <crypto/lrw.h>
 #include <crypto/xts.h>
-#include <asm/xcr.h>
-#include <asm/xsave.h>
+#include <asm/fpu/api.h>
 #include <asm/crypto/glue_helper.h>
 
 #define CAST6_PARALLEL_BLOCKS 8
@@ -590,16 +589,10 @@ static struct crypto_alg cast6_algs[10] = { {
 
 static int __init cast6_init(void)
 {
-       u64 xcr0;
+       const char *feature_name;
 
-       if (!cpu_has_avx || !cpu_has_osxsave) {
-               pr_info("AVX instructions are not detected.\n");
-               return -ENODEV;
-       }
-
-       xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
-       if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
-               pr_info("AVX detected but unusable.\n");
+       if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) {
+               pr_info("CPU feature '%s' is not supported.\n", feature_name);
                return -ENODEV;
        }
 
index 1937fc1d876338aa0aa9bb5fddea9e0aa3541707..07d2c6c86a5483216684970489fcba3b4478007d 100644 (file)
@@ -35,7 +35,7 @@
 
 #include <asm/cpufeature.h>
 #include <asm/cpu_device_id.h>
-#include <asm/i387.h>
+#include <asm/fpu/api.h>
 
 #define CHKSUM_BLOCK_SIZE      1
 #define CHKSUM_DIGEST_SIZE     4
index 28640c3d6af7f6172a8fe39d4553c98019614e24..81a595d75cf5959bbcae8c2096ebdf0f538bf7f9 100644 (file)
@@ -32,8 +32,7 @@
 
 #include <asm/cpufeature.h>
 #include <asm/cpu_device_id.h>
-#include <asm/i387.h>
-#include <asm/fpu-internal.h>
+#include <asm/fpu/internal.h>
 
 #define CHKSUM_BLOCK_SIZE      1
 #define CHKSUM_DIGEST_SIZE     4
index b6c67bf30fdf6704f6d83b093ee73ae7d9b77fcf..a3fcfc97a311d5b660e0dcda3be489038f231dce 100644 (file)
@@ -29,7 +29,7 @@
 #include <linux/init.h>
 #include <linux/string.h>
 #include <linux/kernel.h>
-#include <asm/i387.h>
+#include <asm/fpu/api.h>
 #include <asm/cpufeature.h>
 #include <asm/cpu_device_id.h>
 
index f368ba261739fa09be28bc02fe34cf3112099fa8..5a2f30f9f52dca78820b67cef7c025c53aa0b735 100644 (file)
@@ -18,7 +18,7 @@
 #include <linux/module.h>
 #include <linux/slab.h>
 #include <linux/crypto.h>
-#include <asm/i387.h>
+#include <asm/fpu/api.h>
 
 struct crypto_fpu_ctx {
        struct crypto_blkcipher *child;
index 2079baf06bdd3b64e7f61ec3fecf088c4bd8bc24..64d7cf1b50e112ab370ac63e59e9ff89d4d677e0 100644 (file)
@@ -19,7 +19,7 @@
 #include <crypto/cryptd.h>
 #include <crypto/gf128mul.h>
 #include <crypto/internal/hash.h>
-#include <asm/i387.h>
+#include <asm/fpu/api.h>
 #include <asm/cpu_device_id.h>
 
 #define GHASH_BLOCK_SIZE       16
index 2f63dc89e7a9ed0fa55b1ede20e7bd0ba2c060ca..7d838dc4d888f30010dfba7c21561a16bfe4e3d0 100644 (file)
@@ -20,8 +20,7 @@
 #include <crypto/lrw.h>
 #include <crypto/xts.h>
 #include <crypto/serpent.h>
-#include <asm/xcr.h>
-#include <asm/xsave.h>
+#include <asm/fpu/api.h>
 #include <asm/crypto/serpent-avx.h>
 #include <asm/crypto/glue_helper.h>
 
@@ -537,16 +536,14 @@ static struct crypto_alg srp_algs[10] = { {
 
 static int __init init(void)
 {
-       u64 xcr0;
+       const char *feature_name;
 
        if (!cpu_has_avx2 || !cpu_has_osxsave) {
                pr_info("AVX2 instructions are not detected.\n");
                return -ENODEV;
        }
-
-       xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
-       if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
-               pr_info("AVX detected but unusable.\n");
+       if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) {
+               pr_info("CPU feature '%s' is not supported.\n", feature_name);
                return -ENODEV;
        }
 
index c8d478af84563e72a4ca21b9b5fd725b4c8aa248..da7dafc9b16d5cca124e4f6d01d4462f0cb92b15 100644 (file)
@@ -36,8 +36,7 @@
 #include <crypto/ctr.h>
 #include <crypto/lrw.h>
 #include <crypto/xts.h>
-#include <asm/xcr.h>
-#include <asm/xsave.h>
+#include <asm/fpu/api.h>
 #include <asm/crypto/serpent-avx.h>
 #include <asm/crypto/glue_helper.h>
 
@@ -596,16 +595,10 @@ static struct crypto_alg serpent_algs[10] = { {
 
 static int __init serpent_init(void)
 {
-       u64 xcr0;
+       const char *feature_name;
 
-       if (!cpu_has_avx || !cpu_has_osxsave) {
-               printk(KERN_INFO "AVX instructions are not detected.\n");
-               return -ENODEV;
-       }
-
-       xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
-       if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
-               printk(KERN_INFO "AVX detected but unusable.\n");
+       if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) {
+               pr_info("CPU feature '%s' is not supported.\n", feature_name);
                return -ENODEV;
        }
 
index e510b1c5d690a5115231c608a51b6d4a37932d76..f53ed1dc88eafdbfe95489e49cae16efe1eb8086 100644 (file)
 #include <crypto/mcryptd.h>
 #include <crypto/crypto_wq.h>
 #include <asm/byteorder.h>
-#include <asm/i387.h>
-#include <asm/xcr.h>
-#include <asm/xsave.h>
 #include <linux/hardirq.h>
-#include <asm/fpu-internal.h>
+#include <asm/fpu/api.h>
 #include "sha_mb_ctx.h"
 
 #define FLUSH_INTERVAL 1000 /* in usec */
index 33d1b9dc14cc751203ba72af1585f6685558b7fc..7c48e8b20848e5c890825f25db91c63616dce65d 100644 (file)
@@ -29,9 +29,7 @@
 #include <linux/types.h>
 #include <crypto/sha.h>
 #include <crypto/sha1_base.h>
-#include <asm/i387.h>
-#include <asm/xcr.h>
-#include <asm/xsave.h>
+#include <asm/fpu/api.h>
 
 
 asmlinkage void sha1_transform_ssse3(u32 *digest, const char *data,
@@ -123,15 +121,9 @@ static struct shash_alg alg = {
 #ifdef CONFIG_AS_AVX
 static bool __init avx_usable(void)
 {
-       u64 xcr0;
-
-       if (!cpu_has_avx || !cpu_has_osxsave)
-               return false;
-
-       xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
-       if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
-               pr_info("AVX detected but unusable.\n");
-
+       if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL)) {
+               if (cpu_has_avx)
+                       pr_info("AVX detected but unusable.\n");
                return false;
        }
 
index ccc338881ee81886b6de43963df23d179d510b22..f8097fc0d1d1b56eb6d48389ac2778145897cfb2 100644 (file)
@@ -37,9 +37,7 @@
 #include <linux/types.h>
 #include <crypto/sha.h>
 #include <crypto/sha256_base.h>
-#include <asm/i387.h>
-#include <asm/xcr.h>
-#include <asm/xsave.h>
+#include <asm/fpu/api.h>
 #include <linux/string.h>
 
 asmlinkage void sha256_transform_ssse3(u32 *digest, const char *data,
@@ -132,15 +130,9 @@ static struct shash_alg algs[] = { {
 #ifdef CONFIG_AS_AVX
 static bool __init avx_usable(void)
 {
-       u64 xcr0;
-
-       if (!cpu_has_avx || !cpu_has_osxsave)
-               return false;
-
-       xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
-       if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
-               pr_info("AVX detected but unusable.\n");
-
+       if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL)) {
+               if (cpu_has_avx)
+                       pr_info("AVX detected but unusable.\n");
                return false;
        }
 
index d9fa4c1e063ff631cc09e168c6c6747527edf334..2edad7b81870154055e86694142a66de7fbc48ff 100644 (file)
@@ -35,9 +35,7 @@
 #include <linux/types.h>
 #include <crypto/sha.h>
 #include <crypto/sha512_base.h>
-#include <asm/i387.h>
-#include <asm/xcr.h>
-#include <asm/xsave.h>
+#include <asm/fpu/api.h>
 
 #include <linux/string.h>
 
@@ -131,15 +129,9 @@ static struct shash_alg algs[] = { {
 #ifdef CONFIG_AS_AVX
 static bool __init avx_usable(void)
 {
-       u64 xcr0;
-
-       if (!cpu_has_avx || !cpu_has_osxsave)
-               return false;
-
-       xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
-       if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
-               pr_info("AVX detected but unusable.\n");
-
+       if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, NULL)) {
+               if (cpu_has_avx)
+                       pr_info("AVX detected but unusable.\n");
                return false;
        }
 
index b5e2d56518517010e6869d592aa505450965e36f..c2bd0ce718eee505272249386dffef3cc417dad8 100644 (file)
@@ -36,9 +36,7 @@
 #include <crypto/ctr.h>
 #include <crypto/lrw.h>
 #include <crypto/xts.h>
-#include <asm/i387.h>
-#include <asm/xcr.h>
-#include <asm/xsave.h>
+#include <asm/fpu/api.h>
 #include <asm/crypto/twofish.h>
 #include <asm/crypto/glue_helper.h>
 #include <crypto/scatterwalk.h>
@@ -558,16 +556,10 @@ static struct crypto_alg twofish_algs[10] = { {
 
 static int __init twofish_init(void)
 {
-       u64 xcr0;
+       const char *feature_name;
 
-       if (!cpu_has_avx || !cpu_has_osxsave) {
-               printk(KERN_INFO "AVX instructions are not detected.\n");
-               return -ENODEV;
-       }
-
-       xcr0 = xgetbv(XCR_XFEATURE_ENABLED_MASK);
-       if ((xcr0 & (XSTATE_SSE | XSTATE_YMM)) != (XSTATE_SSE | XSTATE_YMM)) {
-               printk(KERN_INFO "AVX detected but unusable.\n");
+       if (!cpu_has_xfeatures(XSTATE_SSE | XSTATE_YMM, &feature_name)) {
+               pr_info("CPU feature '%s' is not supported.\n", feature_name);
                return -ENODEV;
        }
 
diff --git a/arch/x86/entry/Makefile b/arch/x86/entry/Makefile
new file mode 100644 (file)
index 0000000..7a14497
--- /dev/null
@@ -0,0 +1,10 @@
+#
+# Makefile for the x86 low level entry code
+#
+obj-y                          := entry_$(BITS).o thunk_$(BITS).o syscall_$(BITS).o
+
+obj-y                          += vdso/
+obj-y                          += vsyscall/
+
+obj-$(CONFIG_IA32_EMULATION)   += entry_64_compat.o syscall_32.o
+
diff --git a/arch/x86/entry/calling.h b/arch/x86/entry/calling.h
new file mode 100644 (file)
index 0000000..f4e6308
--- /dev/null
@@ -0,0 +1,243 @@
+/*
+
+ x86 function call convention, 64-bit:
+ -------------------------------------
+  arguments           |  callee-saved      | extra caller-saved | return
+ [callee-clobbered]   |                    | [callee-clobbered] |
+ ---------------------------------------------------------------------------
+ rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11             | rax, rdx [**]
+
+ ( rsp is obviously invariant across normal function calls. (gcc can 'merge'
+   functions when it sees tail-call optimization possibilities) rflags is
+   clobbered. Leftover arguments are passed over the stack frame.)
+
+ [*]  In the frame-pointers case rbp is fixed to the stack frame.
+
+ [**] for struct return values wider than 64 bits the return convention is a
+      bit more complex: up to 128 bits width we return small structures
+      straight in rax, rdx. For structures larger than that (3 words or
+      larger) the caller puts a pointer to an on-stack return struct
+      [allocated in the caller's stack frame] into the first argument - i.e.
+      into rdi. All other arguments shift up by one in this case.
+      Fortunately this case is rare in the kernel.
+
+For 32-bit we have the following conventions - kernel is built with
+-mregparm=3 and -freg-struct-return:
+
+ x86 function calling convention, 32-bit:
+ ----------------------------------------
+  arguments         | callee-saved        | extra caller-saved | return
+ [callee-clobbered] |                     | [callee-clobbered] |
+ -------------------------------------------------------------------------
+ eax edx ecx        | ebx edi esi ebp [*] | <none>             | eax, edx [**]
+
+ ( here too esp is obviously invariant across normal function calls. eflags
+   is clobbered. Leftover arguments are passed over the stack frame. )
+
+ [*]  In the frame-pointers case ebp is fixed to the stack frame.
+
+ [**] We build with -freg-struct-return, which on 32-bit means similar
+      semantics as on 64-bit: edx can be used for a second return value
+      (i.e. covering integer and structure sizes up to 64 bits) - after that
+      it gets more complex and more expensive: 3-word or larger struct returns
+      get done in the caller's frame and the pointer to the return struct goes
+      into regparm0, i.e. eax - the other arguments shift up and the
+      function's register parameters degenerate to regparm=2 in essence.
+
+*/
+
+#ifdef CONFIG_X86_64
+
+/*
+ * 64-bit system call stack frame layout defines and helpers,
+ * for assembly code:
+ */
+
+/* The layout forms the "struct pt_regs" on the stack: */
+/*
+ * C ABI says these regs are callee-preserved. They aren't saved on kernel entry
+ * unless syscall needs a complete, fully filled "struct pt_regs".
+ */
+#define R15            0*8
+#define R14            1*8
+#define R13            2*8
+#define R12            3*8
+#define RBP            4*8
+#define RBX            5*8
+/* These regs are callee-clobbered. Always saved on kernel entry. */
+#define R11            6*8
+#define R10            7*8
+#define R9             8*8
+#define R8             9*8
+#define RAX            10*8
+#define RCX            11*8
+#define RDX            12*8
+#define RSI            13*8
+#define RDI            14*8
+/*
+ * On syscall entry, this is syscall#. On CPU exception, this is error code.
+ * On hw interrupt, it's IRQ number:
+ */
+#define ORIG_RAX       15*8
+/* Return frame for iretq */
+#define RIP            16*8
+#define CS             17*8
+#define EFLAGS         18*8
+#define RSP            19*8
+#define SS             20*8
+
+#define SIZEOF_PTREGS  21*8
+
+       .macro ALLOC_PT_GPREGS_ON_STACK addskip=0
+       addq    $-(15*8+\addskip), %rsp
+       .endm
+
+       .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
+       .if \r11
+       movq %r11, 6*8+\offset(%rsp)
+       .endif
+       .if \r8910
+       movq %r10, 7*8+\offset(%rsp)
+       movq %r9,  8*8+\offset(%rsp)
+       movq %r8,  9*8+\offset(%rsp)
+       .endif
+       .if \rax
+       movq %rax, 10*8+\offset(%rsp)
+       .endif
+       .if \rcx
+       movq %rcx, 11*8+\offset(%rsp)
+       .endif
+       movq %rdx, 12*8+\offset(%rsp)
+       movq %rsi, 13*8+\offset(%rsp)
+       movq %rdi, 14*8+\offset(%rsp)
+       .endm
+       .macro SAVE_C_REGS offset=0
+       SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
+       .endm
+       .macro SAVE_C_REGS_EXCEPT_RAX_RCX offset=0
+       SAVE_C_REGS_HELPER \offset, 0, 0, 1, 1
+       .endm
+       .macro SAVE_C_REGS_EXCEPT_R891011
+       SAVE_C_REGS_HELPER 0, 1, 1, 0, 0
+       .endm
+       .macro SAVE_C_REGS_EXCEPT_RCX_R891011
+       SAVE_C_REGS_HELPER 0, 1, 0, 0, 0
+       .endm
+       .macro SAVE_C_REGS_EXCEPT_RAX_RCX_R11
+       SAVE_C_REGS_HELPER 0, 0, 0, 1, 0
+       .endm
+
+       .macro SAVE_EXTRA_REGS offset=0
+       movq %r15, 0*8+\offset(%rsp)
+       movq %r14, 1*8+\offset(%rsp)
+       movq %r13, 2*8+\offset(%rsp)
+       movq %r12, 3*8+\offset(%rsp)
+       movq %rbp, 4*8+\offset(%rsp)
+       movq %rbx, 5*8+\offset(%rsp)
+       .endm
+       .macro SAVE_EXTRA_REGS_RBP offset=0
+       movq %rbp, 4*8+\offset(%rsp)
+       .endm
+
+       .macro RESTORE_EXTRA_REGS offset=0
+       movq 0*8+\offset(%rsp), %r15
+       movq 1*8+\offset(%rsp), %r14
+       movq 2*8+\offset(%rsp), %r13
+       movq 3*8+\offset(%rsp), %r12
+       movq 4*8+\offset(%rsp), %rbp
+       movq 5*8+\offset(%rsp), %rbx
+       .endm
+
+       .macro ZERO_EXTRA_REGS
+       xorl    %r15d, %r15d
+       xorl    %r14d, %r14d
+       xorl    %r13d, %r13d
+       xorl    %r12d, %r12d
+       xorl    %ebp, %ebp
+       xorl    %ebx, %ebx
+       .endm
+
+       .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
+       .if \rstor_r11
+       movq 6*8(%rsp), %r11
+       .endif
+       .if \rstor_r8910
+       movq 7*8(%rsp), %r10
+       movq 8*8(%rsp), %r9
+       movq 9*8(%rsp), %r8
+       .endif
+       .if \rstor_rax
+       movq 10*8(%rsp), %rax
+       .endif
+       .if \rstor_rcx
+       movq 11*8(%rsp), %rcx
+       .endif
+       .if \rstor_rdx
+       movq 12*8(%rsp), %rdx
+       .endif
+       movq 13*8(%rsp), %rsi
+       movq 14*8(%rsp), %rdi
+       .endm
+       .macro RESTORE_C_REGS
+       RESTORE_C_REGS_HELPER 1,1,1,1,1
+       .endm
+       .macro RESTORE_C_REGS_EXCEPT_RAX
+       RESTORE_C_REGS_HELPER 0,1,1,1,1
+       .endm
+       .macro RESTORE_C_REGS_EXCEPT_RCX
+       RESTORE_C_REGS_HELPER 1,0,1,1,1
+       .endm
+       .macro RESTORE_C_REGS_EXCEPT_R11
+       RESTORE_C_REGS_HELPER 1,1,0,1,1
+       .endm
+       .macro RESTORE_C_REGS_EXCEPT_RCX_R11
+       RESTORE_C_REGS_HELPER 1,0,0,1,1
+       .endm
+       .macro RESTORE_RSI_RDI
+       RESTORE_C_REGS_HELPER 0,0,0,0,0
+       .endm
+       .macro RESTORE_RSI_RDI_RDX
+       RESTORE_C_REGS_HELPER 0,0,0,0,1
+       .endm
+
+       .macro REMOVE_PT_GPREGS_FROM_STACK addskip=0
+       subq $-(15*8+\addskip), %rsp
+       .endm
+
+       .macro icebp
+       .byte 0xf1
+       .endm
+
+#else /* CONFIG_X86_64 */
+
+/*
+ * For 32bit only simplified versions of SAVE_ALL/RESTORE_ALL. These
+ * are different from the entry_32.S versions in not changing the segment
+ * registers. So only suitable for in kernel use, not when transitioning
+ * from or to user space. The resulting stack frame is not a standard
+ * pt_regs frame. The main use case is calling C code from assembler
+ * when all the registers need to be preserved.
+ */
+
+       .macro SAVE_ALL
+       pushl %eax
+       pushl %ebp
+       pushl %edi
+       pushl %esi
+       pushl %edx
+       pushl %ecx
+       pushl %ebx
+       .endm
+
+       .macro RESTORE_ALL
+       popl %ebx
+       popl %ecx
+       popl %edx
+       popl %esi
+       popl %edi
+       popl %ebp
+       popl %eax
+       .endm
+
+#endif /* CONFIG_X86_64 */
+
diff --git a/arch/x86/entry/entry_32.S b/arch/x86/entry/entry_32.S
new file mode 100644 (file)
index 0000000..21dc60a
--- /dev/null
@@ -0,0 +1,1248 @@
+/*
+ *  Copyright (C) 1991,1992  Linus Torvalds
+ *
+ * entry_32.S contains the system-call and low-level fault and trap handling routines.
+ *
+ * Stack layout in 'syscall_exit':
+ *     ptrace needs to have all registers on the stack.
+ *     If the order here is changed, it needs to be
+ *     updated in fork.c:copy_process(), signal.c:do_signal(),
+ *     ptrace.c and ptrace.h
+ *
+ *      0(%esp) - %ebx
+ *      4(%esp) - %ecx
+ *      8(%esp) - %edx
+ *      C(%esp) - %esi
+ *     10(%esp) - %edi
+ *     14(%esp) - %ebp
+ *     18(%esp) - %eax
+ *     1C(%esp) - %ds
+ *     20(%esp) - %es
+ *     24(%esp) - %fs
+ *     28(%esp) - %gs          saved iff !CONFIG_X86_32_LAZY_GS
+ *     2C(%esp) - orig_eax
+ *     30(%esp) - %eip
+ *     34(%esp) - %cs
+ *     38(%esp) - %eflags
+ *     3C(%esp) - %oldesp
+ *     40(%esp) - %oldss
+ */
+
+#include <linux/linkage.h>
+#include <linux/err.h>
+#include <asm/thread_info.h>
+#include <asm/irqflags.h>
+#include <asm/errno.h>
+#include <asm/segment.h>
+#include <asm/smp.h>
+#include <asm/page_types.h>
+#include <asm/percpu.h>
+#include <asm/processor-flags.h>
+#include <asm/ftrace.h>
+#include <asm/irq_vectors.h>
+#include <asm/cpufeature.h>
+#include <asm/alternative-asm.h>
+#include <asm/asm.h>
+#include <asm/smap.h>
+
+/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
+#include <linux/elf-em.h>
+#define AUDIT_ARCH_I386                (EM_386|__AUDIT_ARCH_LE)
+#define __AUDIT_ARCH_LE                0x40000000
+
+#ifndef CONFIG_AUDITSYSCALL
+# define sysenter_audit                syscall_trace_entry
+# define sysexit_audit         syscall_exit_work
+#endif
+
+       .section .entry.text, "ax"
+
+/*
+ * We use macros for low-level operations which need to be overridden
+ * for paravirtualization.  The following will never clobber any registers:
+ *   INTERRUPT_RETURN (aka. "iret")
+ *   GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
+ *   ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
+ *
+ * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
+ * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
+ * Allowing a register to be clobbered can shrink the paravirt replacement
+ * enough to patch inline, increasing performance.
+ */
+
+#ifdef CONFIG_PREEMPT
+# define preempt_stop(clobbers)        DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
+#else
+# define preempt_stop(clobbers)
+# define resume_kernel         restore_all
+#endif
+
+.macro TRACE_IRQS_IRET
+#ifdef CONFIG_TRACE_IRQFLAGS
+       testl   $X86_EFLAGS_IF, PT_EFLAGS(%esp)     # interrupts off?
+       jz      1f
+       TRACE_IRQS_ON
+1:
+#endif
+.endm
+
+/*
+ * User gs save/restore
+ *
+ * %gs is used for userland TLS and kernel only uses it for stack
+ * canary which is required to be at %gs:20 by gcc.  Read the comment
+ * at the top of stackprotector.h for more info.
+ *
+ * Local labels 98 and 99 are used.
+ */
+#ifdef CONFIG_X86_32_LAZY_GS
+
+ /* unfortunately push/pop can't be no-op */
+.macro PUSH_GS
+       pushl   $0
+.endm
+.macro POP_GS pop=0
+       addl    $(4 + \pop), %esp
+.endm
+.macro POP_GS_EX
+.endm
+
+ /* all the rest are no-op */
+.macro PTGS_TO_GS
+.endm
+.macro PTGS_TO_GS_EX
+.endm
+.macro GS_TO_REG reg
+.endm
+.macro REG_TO_PTGS reg
+.endm
+.macro SET_KERNEL_GS reg
+.endm
+
+#else  /* CONFIG_X86_32_LAZY_GS */
+
+.macro PUSH_GS
+       pushl   %gs
+.endm
+
+.macro POP_GS pop=0
+98:    popl    %gs
+  .if \pop <> 0
+       add     $\pop, %esp
+  .endif
+.endm
+.macro POP_GS_EX
+.pushsection .fixup, "ax"
+99:    movl    $0, (%esp)
+       jmp     98b
+.popsection
+       _ASM_EXTABLE(98b, 99b)
+.endm
+
+.macro PTGS_TO_GS
+98:    mov     PT_GS(%esp), %gs
+.endm
+.macro PTGS_TO_GS_EX
+.pushsection .fixup, "ax"
+99:    movl    $0, PT_GS(%esp)
+       jmp     98b
+.popsection
+       _ASM_EXTABLE(98b, 99b)
+.endm
+
+.macro GS_TO_REG reg
+       movl    %gs, \reg
+.endm
+.macro REG_TO_PTGS reg
+       movl    \reg, PT_GS(%esp)
+.endm
+.macro SET_KERNEL_GS reg
+       movl    $(__KERNEL_STACK_CANARY), \reg
+       movl    \reg, %gs
+.endm
+
+#endif /* CONFIG_X86_32_LAZY_GS */
+
+.macro SAVE_ALL
+       cld
+       PUSH_GS
+       pushl   %fs
+       pushl   %es
+       pushl   %ds
+       pushl   %eax
+       pushl   %ebp
+       pushl   %edi
+       pushl   %esi
+       pushl   %edx
+       pushl   %ecx
+       pushl   %ebx
+       movl    $(__USER_DS), %edx
+       movl    %edx, %ds
+       movl    %edx, %es
+       movl    $(__KERNEL_PERCPU), %edx
+       movl    %edx, %fs
+       SET_KERNEL_GS %edx
+.endm
+
+.macro RESTORE_INT_REGS
+       popl    %ebx
+       popl    %ecx
+       popl    %edx
+       popl    %esi
+       popl    %edi
+       popl    %ebp
+       popl    %eax
+.endm
+
+.macro RESTORE_REGS pop=0
+       RESTORE_INT_REGS
+1:     popl    %ds
+2:     popl    %es
+3:     popl    %fs
+       POP_GS \pop
+.pushsection .fixup, "ax"
+4:     movl    $0, (%esp)
+       jmp     1b
+5:     movl    $0, (%esp)
+       jmp     2b
+6:     movl    $0, (%esp)
+       jmp     3b
+.popsection
+       _ASM_EXTABLE(1b, 4b)
+       _ASM_EXTABLE(2b, 5b)
+       _ASM_EXTABLE(3b, 6b)
+       POP_GS_EX
+.endm
+
+ENTRY(ret_from_fork)
+       pushl   %eax
+       call    schedule_tail
+       GET_THREAD_INFO(%ebp)
+       popl    %eax
+       pushl   $0x0202                         # Reset kernel eflags
+       popfl
+       jmp     syscall_exit
+END(ret_from_fork)
+
+ENTRY(ret_from_kernel_thread)
+       pushl   %eax
+       call    schedule_tail
+       GET_THREAD_INFO(%ebp)
+       popl    %eax
+       pushl   $0x0202                         # Reset kernel eflags
+       popfl
+       movl    PT_EBP(%esp), %eax
+       call    *PT_EBX(%esp)
+       movl    $0, PT_EAX(%esp)
+       jmp     syscall_exit
+ENDPROC(ret_from_kernel_thread)
+
+/*
+ * Return to user mode is not as complex as all this looks,
+ * but we want the default path for a system call return to
+ * go as quickly as possible which is why some of this is
+ * less clear than it otherwise should be.
+ */
+
+       # userspace resumption stub bypassing syscall exit tracing
+       ALIGN
+ret_from_exception:
+       preempt_stop(CLBR_ANY)
+ret_from_intr:
+       GET_THREAD_INFO(%ebp)
+#ifdef CONFIG_VM86
+       movl    PT_EFLAGS(%esp), %eax           # mix EFLAGS and CS
+       movb    PT_CS(%esp), %al
+       andl    $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
+#else
+       /*
+        * We can be coming here from child spawned by kernel_thread().
+        */
+       movl    PT_CS(%esp), %eax
+       andl    $SEGMENT_RPL_MASK, %eax
+#endif
+       cmpl    $USER_RPL, %eax
+       jb      resume_kernel                   # not returning to v8086 or userspace
+
+ENTRY(resume_userspace)
+       LOCKDEP_SYS_EXIT
+       DISABLE_INTERRUPTS(CLBR_ANY)            # make sure we don't miss an interrupt
+                                               # setting need_resched or sigpending
+                                               # between sampling and the iret
+       TRACE_IRQS_OFF
+       movl    TI_flags(%ebp), %ecx
+       andl    $_TIF_WORK_MASK, %ecx           # is there any work to be done on
+                                               # int/exception return?
+       jne     work_pending
+       jmp     restore_all
+END(ret_from_exception)
+
+#ifdef CONFIG_PREEMPT
+ENTRY(resume_kernel)
+       DISABLE_INTERRUPTS(CLBR_ANY)
+need_resched:
+       cmpl    $0, PER_CPU_VAR(__preempt_count)
+       jnz     restore_all
+       testl   $X86_EFLAGS_IF, PT_EFLAGS(%esp) # interrupts off (exception path) ?
+       jz      restore_all
+       call    preempt_schedule_irq
+       jmp     need_resched
+END(resume_kernel)
+#endif
+
+/*
+ * SYSENTER_RETURN points to after the SYSENTER instruction
+ * in the vsyscall page.  See vsyscall-sysentry.S, which defines
+ * the symbol.
+ */
+
+       # SYSENTER  call handler stub
+ENTRY(entry_SYSENTER_32)
+       movl    TSS_sysenter_sp0(%esp), %esp
+sysenter_past_esp:
+       /*
+        * Interrupts are disabled here, but we can't trace it until
+        * enough kernel state to call TRACE_IRQS_OFF can be called - but
+        * we immediately enable interrupts at that point anyway.
+        */
+       pushl   $__USER_DS
+       pushl   %ebp
+       pushfl
+       orl     $X86_EFLAGS_IF, (%esp)
+       pushl   $__USER_CS
+       /*
+        * Push current_thread_info()->sysenter_return to the stack.
+        * A tiny bit of offset fixup is necessary: TI_sysenter_return
+        * is relative to thread_info, which is at the bottom of the
+        * kernel stack page.  4*4 means the 4 words pushed above;
+        * TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack;
+        * and THREAD_SIZE takes us to the bottom.
+        */
+       pushl   ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp)
+
+       pushl   %eax
+       SAVE_ALL
+       ENABLE_INTERRUPTS(CLBR_NONE)
+
+/*
+ * Load the potential sixth argument from user stack.
+ * Careful about security.
+ */
+       cmpl    $__PAGE_OFFSET-3, %ebp
+       jae     syscall_fault
+       ASM_STAC
+1:     movl    (%ebp), %ebp
+       ASM_CLAC
+       movl    %ebp, PT_EBP(%esp)
+       _ASM_EXTABLE(1b, syscall_fault)
+
+       GET_THREAD_INFO(%ebp)
+
+       testl   $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp)
+       jnz     sysenter_audit
+sysenter_do_call:
+       cmpl    $(NR_syscalls), %eax
+       jae     sysenter_badsys
+       call    *sys_call_table(, %eax, 4)
+sysenter_after_call:
+       movl    %eax, PT_EAX(%esp)
+       LOCKDEP_SYS_EXIT
+       DISABLE_INTERRUPTS(CLBR_ANY)
+       TRACE_IRQS_OFF
+       movl    TI_flags(%ebp), %ecx
+       testl   $_TIF_ALLWORK_MASK, %ecx
+       jnz     sysexit_audit
+sysenter_exit:
+/* if something modifies registers it must also disable sysexit */
+       movl    PT_EIP(%esp), %edx
+       movl    PT_OLDESP(%esp), %ecx
+       xorl    %ebp, %ebp
+       TRACE_IRQS_ON
+1:     mov     PT_FS(%esp), %fs
+       PTGS_TO_GS
+       ENABLE_INTERRUPTS_SYSEXIT
+
+#ifdef CONFIG_AUDITSYSCALL
+sysenter_audit:
+       testl   $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), TI_flags(%ebp)
+       jnz     syscall_trace_entry
+       /* movl PT_EAX(%esp), %eax already set, syscall number: 1st arg to audit */
+       movl    PT_EBX(%esp), %edx              /* ebx/a0: 2nd arg to audit */
+       /* movl PT_ECX(%esp), %ecx already set, a1: 3nd arg to audit */
+       pushl   PT_ESI(%esp)                    /* a3: 5th arg */
+       pushl   PT_EDX+4(%esp)                  /* a2: 4th arg */
+       call    __audit_syscall_entry
+       popl    %ecx                            /* get that remapped edx off the stack */
+       popl    %ecx                            /* get that remapped esi off the stack */
+       movl    PT_EAX(%esp), %eax              /* reload syscall number */
+       jmp     sysenter_do_call
+
+sysexit_audit:
+       testl   $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
+       jnz     syscall_exit_work
+       TRACE_IRQS_ON
+       ENABLE_INTERRUPTS(CLBR_ANY)
+       movl    %eax, %edx                      /* second arg, syscall return value */
+       cmpl    $-MAX_ERRNO, %eax               /* is it an error ? */
+       setbe %al                               /* 1 if so, 0 if not */
+       movzbl %al, %eax                        /* zero-extend that */
+       call    __audit_syscall_exit
+       DISABLE_INTERRUPTS(CLBR_ANY)
+       TRACE_IRQS_OFF
+       movl    TI_flags(%ebp), %ecx
+       testl   $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
+       jnz     syscall_exit_work
+       movl    PT_EAX(%esp), %eax              /* reload syscall return value */
+       jmp     sysenter_exit
+#endif
+
+.pushsection .fixup, "ax"
+2:     movl    $0, PT_FS(%esp)
+       jmp     1b
+.popsection
+       _ASM_EXTABLE(1b, 2b)
+       PTGS_TO_GS_EX
+ENDPROC(entry_SYSENTER_32)
+
+       # system call handler stub
+ENTRY(entry_INT80_32)
+       ASM_CLAC
+       pushl   %eax                            # save orig_eax
+       SAVE_ALL
+       GET_THREAD_INFO(%ebp)
+                                               # system call tracing in operation / emulation
+       testl   $_TIF_WORK_SYSCALL_ENTRY, TI_flags(%ebp)
+       jnz     syscall_trace_entry
+       cmpl    $(NR_syscalls), %eax
+       jae     syscall_badsys
+syscall_call:
+       call    *sys_call_table(, %eax, 4)
+syscall_after_call:
+       movl    %eax, PT_EAX(%esp)              # store the return value
+syscall_exit:
+       LOCKDEP_SYS_EXIT
+       DISABLE_INTERRUPTS(CLBR_ANY)            # make sure we don't miss an interrupt
+                                               # setting need_resched or sigpending
+                                               # between sampling and the iret
+       TRACE_IRQS_OFF
+       movl    TI_flags(%ebp), %ecx
+       testl   $_TIF_ALLWORK_MASK, %ecx        # current->work
+       jnz     syscall_exit_work
+
+restore_all:
+       TRACE_IRQS_IRET
+restore_all_notrace:
+#ifdef CONFIG_X86_ESPFIX32
+       movl    PT_EFLAGS(%esp), %eax           # mix EFLAGS, SS and CS
+       /*
+        * Warning: PT_OLDSS(%esp) contains the wrong/random values if we
+        * are returning to the kernel.
+        * See comments in process.c:copy_thread() for details.
+        */
+       movb    PT_OLDSS(%esp), %ah
+       movb    PT_CS(%esp), %al
+       andl    $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
+       cmpl    $((SEGMENT_LDT << 8) | USER_RPL), %eax
+       je ldt_ss                               # returning to user-space with LDT SS
+#endif
+restore_nocheck:
+       RESTORE_REGS 4                          # skip orig_eax/error_code
+irq_return:
+       INTERRUPT_RETURN
+.section .fixup, "ax"
+ENTRY(iret_exc )
+       pushl   $0                              # no error code
+       pushl   $do_iret_error
+       jmp     error_code
+.previous
+       _ASM_EXTABLE(irq_return, iret_exc)
+
+#ifdef CONFIG_X86_ESPFIX32
+ldt_ss:
+#ifdef CONFIG_PARAVIRT
+       /*
+        * The kernel can't run on a non-flat stack if paravirt mode
+        * is active.  Rather than try to fixup the high bits of
+        * ESP, bypass this code entirely.  This may break DOSemu
+        * and/or Wine support in a paravirt VM, although the option
+        * is still available to implement the setting of the high
+        * 16-bits in the INTERRUPT_RETURN paravirt-op.
+        */
+       cmpl    $0, pv_info+PARAVIRT_enabled
+       jne     restore_nocheck
+#endif
+
+/*
+ * Setup and switch to ESPFIX stack
+ *
+ * We're returning to userspace with a 16 bit stack. The CPU will not
+ * restore the high word of ESP for us on executing iret... This is an
+ * "official" bug of all the x86-compatible CPUs, which we can work
+ * around to make dosemu and wine happy. We do this by preloading the
+ * high word of ESP with the high word of the userspace ESP while
+ * compensating for the offset by changing to the ESPFIX segment with
+ * a base address that matches for the difference.
+ */
+#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
+       mov     %esp, %edx                      /* load kernel esp */
+       mov     PT_OLDESP(%esp), %eax           /* load userspace esp */
+       mov     %dx, %ax                        /* eax: new kernel esp */
+       sub     %eax, %edx                      /* offset (low word is 0) */
+       shr     $16, %edx
+       mov     %dl, GDT_ESPFIX_SS + 4          /* bits 16..23 */
+       mov     %dh, GDT_ESPFIX_SS + 7          /* bits 24..31 */
+       pushl   $__ESPFIX_SS
+       pushl   %eax                            /* new kernel esp */
+       /*
+        * Disable interrupts, but do not irqtrace this section: we
+        * will soon execute iret and the tracer was already set to
+        * the irqstate after the IRET:
+        */
+       DISABLE_INTERRUPTS(CLBR_EAX)
+       lss     (%esp), %esp                    /* switch to espfix segment */
+       jmp     restore_nocheck
+#endif
+ENDPROC(entry_INT80_32)
+
+       # perform work that needs to be done immediately before resumption
+       ALIGN
+work_pending:
+       testb   $_TIF_NEED_RESCHED, %cl
+       jz      work_notifysig
+work_resched:
+       call    schedule
+       LOCKDEP_SYS_EXIT
+       DISABLE_INTERRUPTS(CLBR_ANY)            # make sure we don't miss an interrupt
+                                               # setting need_resched or sigpending
+                                               # between sampling and the iret
+       TRACE_IRQS_OFF
+       movl    TI_flags(%ebp), %ecx
+       andl    $_TIF_WORK_MASK, %ecx           # is there any work to be done other
+                                               # than syscall tracing?
+       jz      restore_all
+       testb   $_TIF_NEED_RESCHED, %cl
+       jnz     work_resched
+
+work_notifysig:                                        # deal with pending signals and
+                                               # notify-resume requests
+#ifdef CONFIG_VM86
+       testl   $X86_EFLAGS_VM, PT_EFLAGS(%esp)
+       movl    %esp, %eax
+       jnz     work_notifysig_v86              # returning to kernel-space or
+                                               # vm86-space
+1:
+#else
+       movl    %esp, %eax
+#endif
+       TRACE_IRQS_ON
+       ENABLE_INTERRUPTS(CLBR_NONE)
+       movb    PT_CS(%esp), %bl
+       andb    $SEGMENT_RPL_MASK, %bl
+       cmpb    $USER_RPL, %bl
+       jb      resume_kernel
+       xorl    %edx, %edx
+       call    do_notify_resume
+       jmp     resume_userspace
+
+#ifdef CONFIG_VM86
+       ALIGN
+work_notifysig_v86:
+       pushl   %ecx                            # save ti_flags for do_notify_resume
+       call    save_v86_state                  # %eax contains pt_regs pointer
+       popl    %ecx
+       movl    %eax, %esp
+       jmp     1b
+#endif
+END(work_pending)
+
+       # perform syscall exit tracing
+       ALIGN
+syscall_trace_entry:
+       movl    $-ENOSYS, PT_EAX(%esp)
+       movl    %esp, %eax
+       call    syscall_trace_enter
+       /* What it returned is what we'll actually use.  */
+       cmpl    $(NR_syscalls), %eax
+       jnae    syscall_call
+       jmp     syscall_exit
+END(syscall_trace_entry)
+
+       # perform syscall exit tracing
+       ALIGN
+syscall_exit_work:
+       testl   $_TIF_WORK_SYSCALL_EXIT, %ecx
+       jz      work_pending
+       TRACE_IRQS_ON
+       ENABLE_INTERRUPTS(CLBR_ANY)             # could let syscall_trace_leave() call
+                                               # schedule() instead
+       movl    %esp, %eax
+       call    syscall_trace_leave
+       jmp     resume_userspace
+END(syscall_exit_work)
+
+syscall_fault:
+       ASM_CLAC
+       GET_THREAD_INFO(%ebp)
+       movl    $-EFAULT, PT_EAX(%esp)
+       jmp     resume_userspace
+END(syscall_fault)
+
+syscall_badsys:
+       movl    $-ENOSYS, %eax
+       jmp     syscall_after_call
+END(syscall_badsys)
+
+sysenter_badsys:
+       movl    $-ENOSYS, %eax
+       jmp     sysenter_after_call
+END(sysenter_badsys)
+
+.macro FIXUP_ESPFIX_STACK
+/*
+ * Switch back for ESPFIX stack to the normal zerobased stack
+ *
+ * We can't call C functions using the ESPFIX stack. This code reads
+ * the high word of the segment base from the GDT and swiches to the
+ * normal stack and adjusts ESP with the matching offset.
+ */
+#ifdef CONFIG_X86_ESPFIX32
+       /* fixup the stack */
+       mov     GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
+       mov     GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
+       shl     $16, %eax
+       addl    %esp, %eax                      /* the adjusted stack pointer */
+       pushl   $__KERNEL_DS
+       pushl   %eax
+       lss     (%esp), %esp                    /* switch to the normal stack segment */
+#endif
+.endm
+.macro UNWIND_ESPFIX_STACK
+#ifdef CONFIG_X86_ESPFIX32
+       movl    %ss, %eax
+       /* see if on espfix stack */
+       cmpw    $__ESPFIX_SS, %ax
+       jne     27f
+       movl    $__KERNEL_DS, %eax
+       movl    %eax, %ds
+       movl    %eax, %es
+       /* switch to normal stack */
+       FIXUP_ESPFIX_STACK
+27:
+#endif
+.endm
+
+/*
+ * Build the entry stubs with some assembler magic.
+ * We pack 1 stub into every 8-byte block.
+ */
+       .align 8
+ENTRY(irq_entries_start)
+    vector=FIRST_EXTERNAL_VECTOR
+    .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
+       pushl   $(~vector+0x80)                 /* Note: always in signed byte range */
+    vector=vector+1
+       jmp     common_interrupt
+       .align  8
+    .endr
+END(irq_entries_start)
+
+/*
+ * the CPU automatically disables interrupts when executing an IRQ vector,
+ * so IRQ-flags tracing has to follow that:
+ */
+       .p2align CONFIG_X86_L1_CACHE_SHIFT
+common_interrupt:
+       ASM_CLAC
+       addl    $-0x80, (%esp)                  /* Adjust vector into the [-256, -1] range */
+       SAVE_ALL
+       TRACE_IRQS_OFF
+       movl    %esp, %eax
+       call    do_IRQ
+       jmp     ret_from_intr
+ENDPROC(common_interrupt)
+
+#define BUILD_INTERRUPT3(name, nr, fn) \
+ENTRY(name)                            \
+       ASM_CLAC;                       \
+       pushl   $~(nr);                 \
+       SAVE_ALL;                       \
+       TRACE_IRQS_OFF                  \
+       movl    %esp, %eax;             \
+       call    fn;                     \
+       jmp     ret_from_intr;          \
+ENDPROC(name)
+
+
+#ifdef CONFIG_TRACING
+# define TRACE_BUILD_INTERRUPT(name, nr)       BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
+#else
+# define TRACE_BUILD_INTERRUPT(name, nr)
+#endif
+
+#define BUILD_INTERRUPT(name, nr)              \
+       BUILD_INTERRUPT3(name, nr, smp_##name); \
+       TRACE_BUILD_INTERRUPT(name, nr)
+
+/* The include is where all of the SMP etc. interrupts come from */
+#include <asm/entry_arch.h>
+
+ENTRY(coprocessor_error)
+       ASM_CLAC
+       pushl   $0
+       pushl   $do_coprocessor_error
+       jmp     error_code
+END(coprocessor_error)
+
+ENTRY(simd_coprocessor_error)
+       ASM_CLAC
+       pushl   $0
+#ifdef CONFIG_X86_INVD_BUG
+       /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
+       ALTERNATIVE "pushl      $do_general_protection",        \
+                   "pushl      $do_simd_coprocessor_error",    \
+                   X86_FEATURE_XMM
+#else
+       pushl   $do_simd_coprocessor_error
+#endif
+       jmp     error_code
+END(simd_coprocessor_error)
+
+ENTRY(device_not_available)
+       ASM_CLAC
+       pushl   $-1                             # mark this as an int
+       pushl   $do_device_not_available
+       jmp     error_code
+END(device_not_available)
+
+#ifdef CONFIG_PARAVIRT
+ENTRY(native_iret)
+       iret
+       _ASM_EXTABLE(native_iret, iret_exc)
+END(native_iret)
+
+ENTRY(native_irq_enable_sysexit)
+       sti
+       sysexit
+END(native_irq_enable_sysexit)
+#endif
+
+ENTRY(overflow)
+       ASM_CLAC
+       pushl   $0
+       pushl   $do_overflow
+       jmp     error_code
+END(overflow)
+
+ENTRY(bounds)
+       ASM_CLAC
+       pushl   $0
+       pushl   $do_bounds
+       jmp     error_code
+END(bounds)
+
+ENTRY(invalid_op)
+       ASM_CLAC
+       pushl   $0
+       pushl   $do_invalid_op
+       jmp     error_code
+END(invalid_op)
+
+ENTRY(coprocessor_segment_overrun)
+       ASM_CLAC
+       pushl   $0
+       pushl   $do_coprocessor_segment_overrun
+       jmp     error_code
+END(coprocessor_segment_overrun)
+
+ENTRY(invalid_TSS)
+       ASM_CLAC
+       pushl   $do_invalid_TSS
+       jmp     error_code
+END(invalid_TSS)
+
+ENTRY(segment_not_present)
+       ASM_CLAC
+       pushl   $do_segment_not_present
+       jmp     error_code
+END(segment_not_present)
+
+ENTRY(stack_segment)
+       ASM_CLAC
+       pushl   $do_stack_segment
+       jmp     error_code
+END(stack_segment)
+
+ENTRY(alignment_check)
+       ASM_CLAC
+       pushl   $do_alignment_check
+       jmp     error_code
+END(alignment_check)
+
+ENTRY(divide_error)
+       ASM_CLAC
+       pushl   $0                              # no error code
+       pushl   $do_divide_error
+       jmp     error_code
+END(divide_error)
+
+#ifdef CONFIG_X86_MCE
+ENTRY(machine_check)
+       ASM_CLAC
+       pushl   $0
+       pushl   machine_check_vector
+       jmp     error_code
+END(machine_check)
+#endif
+
+ENTRY(spurious_interrupt_bug)
+       ASM_CLAC
+       pushl   $0
+       pushl   $do_spurious_interrupt_bug
+       jmp     error_code
+END(spurious_interrupt_bug)
+
+#ifdef CONFIG_XEN
+/*
+ * Xen doesn't set %esp to be precisely what the normal SYSENTER
+ * entry point expects, so fix it up before using the normal path.
+ */
+ENTRY(xen_sysenter_target)
+       addl    $5*4, %esp                      /* remove xen-provided frame */
+       jmp     sysenter_past_esp
+
+ENTRY(xen_hypervisor_callback)
+       pushl   $-1                             /* orig_ax = -1 => not a system call */
+       SAVE_ALL
+       TRACE_IRQS_OFF
+
+       /*
+        * Check to see if we got the event in the critical
+        * region in xen_iret_direct, after we've reenabled
+        * events and checked for pending events.  This simulates
+        * iret instruction's behaviour where it delivers a
+        * pending interrupt when enabling interrupts:
+        */
+       movl    PT_EIP(%esp), %eax
+       cmpl    $xen_iret_start_crit, %eax
+       jb      1f
+       cmpl    $xen_iret_end_crit, %eax
+       jae     1f
+
+       jmp     xen_iret_crit_fixup
+
+ENTRY(xen_do_upcall)
+1:     mov     %esp, %eax
+       call    xen_evtchn_do_upcall
+#ifndef CONFIG_PREEMPT
+       call    xen_maybe_preempt_hcall
+#endif
+       jmp     ret_from_intr
+ENDPROC(xen_hypervisor_callback)
+
+/*
+ * Hypervisor uses this for application faults while it executes.
+ * We get here for two reasons:
+ *  1. Fault while reloading DS, ES, FS or GS
+ *  2. Fault while executing IRET
+ * Category 1 we fix up by reattempting the load, and zeroing the segment
+ * register if the load fails.
+ * Category 2 we fix up by jumping to do_iret_error. We cannot use the
+ * normal Linux return path in this case because if we use the IRET hypercall
+ * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
+ * We distinguish between categories by maintaining a status value in EAX.
+ */
+ENTRY(xen_failsafe_callback)
+       pushl   %eax
+       movl    $1, %eax
+1:     mov     4(%esp), %ds
+2:     mov     8(%esp), %es
+3:     mov     12(%esp), %fs
+4:     mov     16(%esp), %gs
+       /* EAX == 0 => Category 1 (Bad segment)
+          EAX != 0 => Category 2 (Bad IRET) */
+       testl   %eax, %eax
+       popl    %eax
+       lea     16(%esp), %esp
+       jz      5f
+       jmp     iret_exc
+5:     pushl   $-1                             /* orig_ax = -1 => not a system call */
+       SAVE_ALL
+       jmp     ret_from_exception
+
+.section .fixup, "ax"
+6:     xorl    %eax, %eax
+       movl    %eax, 4(%esp)
+       jmp     1b
+7:     xorl    %eax, %eax
+       movl    %eax, 8(%esp)
+       jmp     2b
+8:     xorl    %eax, %eax
+       movl    %eax, 12(%esp)
+       jmp     3b
+9:     xorl    %eax, %eax
+       movl    %eax, 16(%esp)
+       jmp     4b
+.previous
+       _ASM_EXTABLE(1b, 6b)
+       _ASM_EXTABLE(2b, 7b)
+       _ASM_EXTABLE(3b, 8b)
+       _ASM_EXTABLE(4b, 9b)
+ENDPROC(xen_failsafe_callback)
+
+BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
+               xen_evtchn_do_upcall)
+
+#endif /* CONFIG_XEN */
+
+#if IS_ENABLED(CONFIG_HYPERV)
+
+BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
+       hyperv_vector_handler)
+
+#endif /* CONFIG_HYPERV */
+
+#ifdef CONFIG_FUNCTION_TRACER
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+ENTRY(mcount)
+       ret
+END(mcount)
+
+ENTRY(ftrace_caller)
+       pushl   %eax
+       pushl   %ecx
+       pushl   %edx
+       pushl   $0                              /* Pass NULL as regs pointer */
+       movl    4*4(%esp), %eax
+       movl    0x4(%ebp), %edx
+       movl    function_trace_op, %ecx
+       subl    $MCOUNT_INSN_SIZE, %eax
+
+.globl ftrace_call
+ftrace_call:
+       call    ftrace_stub
+
+       addl    $4, %esp                        /* skip NULL pointer */
+       popl    %edx
+       popl    %ecx
+       popl    %eax
+ftrace_ret:
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+.globl ftrace_graph_call
+ftrace_graph_call:
+       jmp     ftrace_stub
+#endif
+
+.globl ftrace_stub
+ftrace_stub:
+       ret
+END(ftrace_caller)
+
+ENTRY(ftrace_regs_caller)
+       pushf   /* push flags before compare (in cs location) */
+
+       /*
+        * i386 does not save SS and ESP when coming from kernel.
+        * Instead, to get sp, &regs->sp is used (see ptrace.h).
+        * Unfortunately, that means eflags must be at the same location
+        * as the current return ip is. We move the return ip into the
+        * ip location, and move flags into the return ip location.
+        */
+       pushl   4(%esp)                         /* save return ip into ip slot */
+
+       pushl   $0                              /* Load 0 into orig_ax */
+       pushl   %gs
+       pushl   %fs
+       pushl   %es
+       pushl   %ds
+       pushl   %eax
+       pushl   %ebp
+       pushl   %edi
+       pushl   %esi
+       pushl   %edx
+       pushl   %ecx
+       pushl   %ebx
+
+       movl    13*4(%esp), %eax                /* Get the saved flags */
+       movl    %eax, 14*4(%esp)                /* Move saved flags into regs->flags location */
+                                               /* clobbering return ip */
+       movl    $__KERNEL_CS, 13*4(%esp)
+
+       movl    12*4(%esp), %eax                /* Load ip (1st parameter) */
+       subl    $MCOUNT_INSN_SIZE, %eax         /* Adjust ip */
+       movl    0x4(%ebp), %edx                 /* Load parent ip (2nd parameter) */
+       movl    function_trace_op, %ecx         /* Save ftrace_pos in 3rd parameter */
+       pushl   %esp                            /* Save pt_regs as 4th parameter */
+
+GLOBAL(ftrace_regs_call)
+       call    ftrace_stub
+
+       addl    $4, %esp                        /* Skip pt_regs */
+       movl    14*4(%esp), %eax                /* Move flags back into cs */
+       movl    %eax, 13*4(%esp)                /* Needed to keep addl  from modifying flags */
+       movl    12*4(%esp), %eax                /* Get return ip from regs->ip */
+       movl    %eax, 14*4(%esp)                /* Put return ip back for ret */
+
+       popl    %ebx
+       popl    %ecx
+       popl    %edx
+       popl    %esi
+       popl    %edi
+       popl    %ebp
+       popl    %eax
+       popl    %ds
+       popl    %es
+       popl    %fs
+       popl    %gs
+       addl    $8, %esp                        /* Skip orig_ax and ip */
+       popf                                    /* Pop flags at end (no addl to corrupt flags) */
+       jmp     ftrace_ret
+
+       popf
+       jmp     ftrace_stub
+#else /* ! CONFIG_DYNAMIC_FTRACE */
+
+ENTRY(mcount)
+       cmpl    $__PAGE_OFFSET, %esp
+       jb      ftrace_stub                     /* Paging not enabled yet? */
+
+       cmpl    $ftrace_stub, ftrace_trace_function
+       jnz     trace
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+       cmpl    $ftrace_stub, ftrace_graph_return
+       jnz     ftrace_graph_caller
+
+       cmpl    $ftrace_graph_entry_stub, ftrace_graph_entry
+       jnz     ftrace_graph_caller
+#endif
+.globl ftrace_stub
+ftrace_stub:
+       ret
+
+       /* taken from glibc */
+trace:
+       pushl   %eax
+       pushl   %ecx
+       pushl   %edx
+       movl    0xc(%esp), %eax
+       movl    0x4(%ebp), %edx
+       subl    $MCOUNT_INSN_SIZE, %eax
+
+       call    *ftrace_trace_function
+
+       popl    %edx
+       popl    %ecx
+       popl    %eax
+       jmp     ftrace_stub
+END(mcount)
+#endif /* CONFIG_DYNAMIC_FTRACE */
+#endif /* CONFIG_FUNCTION_TRACER */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(ftrace_graph_caller)
+       pushl   %eax
+       pushl   %ecx
+       pushl   %edx
+       movl    0xc(%esp), %eax
+       lea     0x4(%ebp), %edx
+       movl    (%ebp), %ecx
+       subl    $MCOUNT_INSN_SIZE, %eax
+       call    prepare_ftrace_return
+       popl    %edx
+       popl    %ecx
+       popl    %eax
+       ret
+END(ftrace_graph_caller)
+
+.globl return_to_handler
+return_to_handler:
+       pushl   %eax
+       pushl   %edx
+       movl    %ebp, %eax
+       call    ftrace_return_to_handler
+       movl    %eax, %ecx
+       popl    %edx
+       popl    %eax
+       jmp     *%ecx
+#endif
+
+#ifdef CONFIG_TRACING
+ENTRY(trace_page_fault)
+       ASM_CLAC
+       pushl   $trace_do_page_fault
+       jmp     error_code
+END(trace_page_fault)
+#endif
+
+ENTRY(page_fault)
+       ASM_CLAC
+       pushl   $do_page_fault
+       ALIGN
+error_code:
+       /* the function address is in %gs's slot on the stack */
+       pushl   %fs
+       pushl   %es
+       pushl   %ds
+       pushl   %eax
+       pushl   %ebp
+       pushl   %edi
+       pushl   %esi
+       pushl   %edx
+       pushl   %ecx
+       pushl   %ebx
+       cld
+       movl    $(__KERNEL_PERCPU), %ecx
+       movl    %ecx, %fs
+       UNWIND_ESPFIX_STACK
+       GS_TO_REG %ecx
+       movl    PT_GS(%esp), %edi               # get the function address
+       movl    PT_ORIG_EAX(%esp), %edx         # get the error code
+       movl    $-1, PT_ORIG_EAX(%esp)          # no syscall to restart
+       REG_TO_PTGS %ecx
+       SET_KERNEL_GS %ecx
+       movl    $(__USER_DS), %ecx
+       movl    %ecx, %ds
+       movl    %ecx, %es
+       TRACE_IRQS_OFF
+       movl    %esp, %eax                      # pt_regs pointer
+       call    *%edi
+       jmp     ret_from_exception
+END(page_fault)
+
+/*
+ * Debug traps and NMI can happen at the one SYSENTER instruction
+ * that sets up the real kernel stack. Check here, since we can't
+ * allow the wrong stack to be used.
+ *
+ * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
+ * already pushed 3 words if it hits on the sysenter instruction:
+ * eflags, cs and eip.
+ *
+ * We just load the right stack, and push the three (known) values
+ * by hand onto the new stack - while updating the return eip past
+ * the instruction that would have done it for sysenter.
+ */
+.macro FIX_STACK offset ok label
+       cmpw    $__KERNEL_CS, 4(%esp)
+       jne     \ok
+\label:
+       movl    TSS_sysenter_sp0 + \offset(%esp), %esp
+       pushfl
+       pushl   $__KERNEL_CS
+       pushl   $sysenter_past_esp
+.endm
+
+ENTRY(debug)
+       ASM_CLAC
+       cmpl    $entry_SYSENTER_32, (%esp)
+       jne     debug_stack_correct
+       FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
+debug_stack_correct:
+       pushl   $-1                             # mark this as an int
+       SAVE_ALL
+       TRACE_IRQS_OFF
+       xorl    %edx, %edx                      # error code 0
+       movl    %esp, %eax                      # pt_regs pointer
+       call    do_debug
+       jmp     ret_from_exception
+END(debug)
+
+/*
+ * NMI is doubly nasty. It can happen _while_ we're handling
+ * a debug fault, and the debug fault hasn't yet been able to
+ * clear up the stack. So we first check whether we got  an
+ * NMI on the sysenter entry path, but after that we need to
+ * check whether we got an NMI on the debug path where the debug
+ * fault happened on the sysenter path.
+ */
+ENTRY(nmi)
+       ASM_CLAC
+#ifdef CONFIG_X86_ESPFIX32
+       pushl   %eax
+       movl    %ss, %eax
+       cmpw    $__ESPFIX_SS, %ax
+       popl    %eax
+       je      nmi_espfix_stack
+#endif
+       cmpl    $entry_SYSENTER_32, (%esp)
+       je      nmi_stack_fixup
+       pushl   %eax
+       movl    %esp, %eax
+       /*
+        * Do not access memory above the end of our stack page,
+        * it might not exist.
+        */
+       andl    $(THREAD_SIZE-1), %eax
+       cmpl    $(THREAD_SIZE-20), %eax
+       popl    %eax
+       jae     nmi_stack_correct
+       cmpl    $entry_SYSENTER_32, 12(%esp)
+       je      nmi_debug_stack_check
+nmi_stack_correct:
+       pushl   %eax
+       SAVE_ALL
+       xorl    %edx, %edx                      # zero error code
+       movl    %esp, %eax                      # pt_regs pointer
+       call    do_nmi
+       jmp     restore_all_notrace
+
+nmi_stack_fixup:
+       FIX_STACK 12, nmi_stack_correct, 1
+       jmp     nmi_stack_correct
+
+nmi_debug_stack_check:
+       cmpw    $__KERNEL_CS, 16(%esp)
+       jne     nmi_stack_correct
+       cmpl    $debug, (%esp)
+       jb      nmi_stack_correct
+       cmpl    $debug_esp_fix_insn, (%esp)
+       ja      nmi_stack_correct
+       FIX_STACK 24, nmi_stack_correct, 1
+       jmp     nmi_stack_correct
+
+#ifdef CONFIG_X86_ESPFIX32
+nmi_espfix_stack:
+       /*
+        * create the pointer to lss back
+        */
+       pushl   %ss
+       pushl   %esp
+       addl    $4, (%esp)
+       /* copy the iret frame of 12 bytes */
+       .rept 3
+       pushl   16(%esp)
+       .endr
+       pushl   %eax
+       SAVE_ALL
+       FIXUP_ESPFIX_STACK                      # %eax == %esp
+       xorl    %edx, %edx                      # zero error code
+       call    do_nmi
+       RESTORE_REGS
+       lss     12+4(%esp), %esp                # back to espfix stack
+       jmp     irq_return
+#endif
+END(nmi)
+
+ENTRY(int3)
+       ASM_CLAC
+       pushl   $-1                             # mark this as an int
+       SAVE_ALL
+       TRACE_IRQS_OFF
+       xorl    %edx, %edx                      # zero error code
+       movl    %esp, %eax                      # pt_regs pointer
+       call    do_int3
+       jmp     ret_from_exception
+END(int3)
+
+ENTRY(general_protection)
+       pushl   $do_general_protection
+       jmp     error_code
+END(general_protection)
+
+#ifdef CONFIG_KVM_GUEST
+ENTRY(async_page_fault)
+       ASM_CLAC
+       pushl   $do_async_page_fault
+       jmp     error_code
+END(async_page_fault)
+#endif
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S
new file mode 100644 (file)
index 0000000..3bb2c43
--- /dev/null
@@ -0,0 +1,1458 @@
+/*
+ *  linux/arch/x86_64/entry.S
+ *
+ *  Copyright (C) 1991, 1992  Linus Torvalds
+ *  Copyright (C) 2000, 2001, 2002  Andi Kleen SuSE Labs
+ *  Copyright (C) 2000  Pavel Machek <pavel@suse.cz>
+ *
+ * entry.S contains the system-call and fault low-level handling routines.
+ *
+ * Some of this is documented in Documentation/x86/entry_64.txt
+ *
+ * A note on terminology:
+ * - iret frame:       Architecture defined interrupt frame from SS to RIP
+ *                     at the top of the kernel process stack.
+ *
+ * Some macro usage:
+ * - ENTRY/END:                Define functions in the symbol table.
+ * - TRACE_IRQ_*:      Trace hardirq state for lock debugging.
+ * - idtentry:         Define exception entry points.
+ */
+#include <linux/linkage.h>
+#include <asm/segment.h>
+#include <asm/cache.h>
+#include <asm/errno.h>
+#include "calling.h"
+#include <asm/asm-offsets.h>
+#include <asm/msr.h>
+#include <asm/unistd.h>
+#include <asm/thread_info.h>
+#include <asm/hw_irq.h>
+#include <asm/page_types.h>
+#include <asm/irqflags.h>
+#include <asm/paravirt.h>
+#include <asm/percpu.h>
+#include <asm/asm.h>
+#include <asm/context_tracking.h>
+#include <asm/smap.h>
+#include <asm/pgtable_types.h>
+#include <linux/err.h>
+
+/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
+#include <linux/elf-em.h>
+#define AUDIT_ARCH_X86_64                      (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
+#define __AUDIT_ARCH_64BIT                     0x80000000
+#define __AUDIT_ARCH_LE                                0x40000000
+
+.code64
+.section .entry.text, "ax"
+
+#ifdef CONFIG_PARAVIRT
+ENTRY(native_usergs_sysret64)
+       swapgs
+       sysretq
+ENDPROC(native_usergs_sysret64)
+#endif /* CONFIG_PARAVIRT */
+
+.macro TRACE_IRQS_IRETQ
+#ifdef CONFIG_TRACE_IRQFLAGS
+       bt      $9, EFLAGS(%rsp)                /* interrupts off? */
+       jnc     1f
+       TRACE_IRQS_ON
+1:
+#endif
+.endm
+
+/*
+ * When dynamic function tracer is enabled it will add a breakpoint
+ * to all locations that it is about to modify, sync CPUs, update
+ * all the code, sync CPUs, then remove the breakpoints. In this time
+ * if lockdep is enabled, it might jump back into the debug handler
+ * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF).
+ *
+ * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to
+ * make sure the stack pointer does not get reset back to the top
+ * of the debug stack, and instead just reuses the current stack.
+ */
+#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS)
+
+.macro TRACE_IRQS_OFF_DEBUG
+       call    debug_stack_set_zero
+       TRACE_IRQS_OFF
+       call    debug_stack_reset
+.endm
+
+.macro TRACE_IRQS_ON_DEBUG
+       call    debug_stack_set_zero
+       TRACE_IRQS_ON
+       call    debug_stack_reset
+.endm
+
+.macro TRACE_IRQS_IRETQ_DEBUG
+       bt      $9, EFLAGS(%rsp)                /* interrupts off? */
+       jnc     1f
+       TRACE_IRQS_ON_DEBUG
+1:
+.endm
+
+#else
+# define TRACE_IRQS_OFF_DEBUG                  TRACE_IRQS_OFF
+# define TRACE_IRQS_ON_DEBUG                   TRACE_IRQS_ON
+# define TRACE_IRQS_IRETQ_DEBUG                        TRACE_IRQS_IRETQ
+#endif
+
+/*
+ * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers.
+ *
+ * 64-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
+ * then loads new ss, cs, and rip from previously programmed MSRs.
+ * rflags gets masked by a value from another MSR (so CLD and CLAC
+ * are not needed). SYSCALL does not save anything on the stack
+ * and does not change rsp.
+ *
+ * Registers on entry:
+ * rax  system call number
+ * rcx  return address
+ * r11  saved rflags (note: r11 is callee-clobbered register in C ABI)
+ * rdi  arg0
+ * rsi  arg1
+ * rdx  arg2
+ * r10  arg3 (needs to be moved to rcx to conform to C ABI)
+ * r8   arg4
+ * r9   arg5
+ * (note: r12-r15, rbp, rbx are callee-preserved in C ABI)
+ *
+ * Only called from user space.
+ *
+ * When user can change pt_regs->foo always force IRET. That is because
+ * it deals with uncanonical addresses better. SYSRET has trouble
+ * with them due to bugs in both AMD and Intel CPUs.
+ */
+
+ENTRY(entry_SYSCALL_64)
+       /*
+        * Interrupts are off on entry.
+        * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
+        * it is too small to ever cause noticeable irq latency.
+        */
+       SWAPGS_UNSAFE_STACK
+       /*
+        * A hypervisor implementation might want to use a label
+        * after the swapgs, so that it can do the swapgs
+        * for the guest and jump here on syscall.
+        */
+GLOBAL(entry_SYSCALL_64_after_swapgs)
+
+       movq    %rsp, PER_CPU_VAR(rsp_scratch)
+       movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+
+       /* Construct struct pt_regs on stack */
+       pushq   $__USER_DS                      /* pt_regs->ss */
+       pushq   PER_CPU_VAR(rsp_scratch)        /* pt_regs->sp */
+       /*
+        * Re-enable interrupts.
+        * We use 'rsp_scratch' as a scratch space, hence irq-off block above
+        * must execute atomically in the face of possible interrupt-driven
+        * task preemption. We must enable interrupts only after we're done
+        * with using rsp_scratch:
+        */
+       ENABLE_INTERRUPTS(CLBR_NONE)
+       pushq   %r11                            /* pt_regs->flags */
+       pushq   $__USER_CS                      /* pt_regs->cs */
+       pushq   %rcx                            /* pt_regs->ip */
+       pushq   %rax                            /* pt_regs->orig_ax */
+       pushq   %rdi                            /* pt_regs->di */
+       pushq   %rsi                            /* pt_regs->si */
+       pushq   %rdx                            /* pt_regs->dx */
+       pushq   %rcx                            /* pt_regs->cx */
+       pushq   $-ENOSYS                        /* pt_regs->ax */
+       pushq   %r8                             /* pt_regs->r8 */
+       pushq   %r9                             /* pt_regs->r9 */
+       pushq   %r10                            /* pt_regs->r10 */
+       pushq   %r11                            /* pt_regs->r11 */
+       sub     $(6*8), %rsp                    /* pt_regs->bp, bx, r12-15 not saved */
+
+       testl   $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
+       jnz     tracesys
+entry_SYSCALL_64_fastpath:
+#if __SYSCALL_MASK == ~0
+       cmpq    $__NR_syscall_max, %rax
+#else
+       andl    $__SYSCALL_MASK, %eax
+       cmpl    $__NR_syscall_max, %eax
+#endif
+       ja      1f                              /* return -ENOSYS (already in pt_regs->ax) */
+       movq    %r10, %rcx
+       call    *sys_call_table(, %rax, 8)
+       movq    %rax, RAX(%rsp)
+1:
+/*
+ * Syscall return path ending with SYSRET (fast path).
+ * Has incompletely filled pt_regs.
+ */
+       LOCKDEP_SYS_EXIT
+       /*
+        * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
+        * it is too small to ever cause noticeable irq latency.
+        */
+       DISABLE_INTERRUPTS(CLBR_NONE)
+
+       /*
+        * We must check ti flags with interrupts (or at least preemption)
+        * off because we must *never* return to userspace without
+        * processing exit work that is enqueued if we're preempted here.
+        * In particular, returning to userspace with any of the one-shot
+        * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
+        * very bad.
+        */
+       testl   $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
+       jnz     int_ret_from_sys_call_irqs_off  /* Go to the slow path */
+
+       RESTORE_C_REGS_EXCEPT_RCX_R11
+       movq    RIP(%rsp), %rcx
+       movq    EFLAGS(%rsp), %r11
+       movq    RSP(%rsp), %rsp
+       /*
+        * 64-bit SYSRET restores rip from rcx,
+        * rflags from r11 (but RF and VM bits are forced to 0),
+        * cs and ss are loaded from MSRs.
+        * Restoration of rflags re-enables interrupts.
+        *
+        * NB: On AMD CPUs with the X86_BUG_SYSRET_SS_ATTRS bug, the ss
+        * descriptor is not reinitialized.  This means that we should
+        * avoid SYSRET with SS == NULL, which could happen if we schedule,
+        * exit the kernel, and re-enter using an interrupt vector.  (All
+        * interrupt entries on x86_64 set SS to NULL.)  We prevent that
+        * from happening by reloading SS in __switch_to.  (Actually
+        * detecting the failure in 64-bit userspace is tricky but can be
+        * done.)
+        */
+       USERGS_SYSRET64
+
+       /* Do syscall entry tracing */
+tracesys:
+       movq    %rsp, %rdi
+       movl    $AUDIT_ARCH_X86_64, %esi
+       call    syscall_trace_enter_phase1
+       test    %rax, %rax
+       jnz     tracesys_phase2                 /* if needed, run the slow path */
+       RESTORE_C_REGS_EXCEPT_RAX               /* else restore clobbered regs */
+       movq    ORIG_RAX(%rsp), %rax
+       jmp     entry_SYSCALL_64_fastpath       /* and return to the fast path */
+
+tracesys_phase2:
+       SAVE_EXTRA_REGS
+       movq    %rsp, %rdi
+       movl    $AUDIT_ARCH_X86_64, %esi
+       movq    %rax, %rdx
+       call    syscall_trace_enter_phase2
+
+       /*
+        * Reload registers from stack in case ptrace changed them.
+        * We don't reload %rax because syscall_trace_entry_phase2() returned
+        * the value it wants us to use in the table lookup.
+        */
+       RESTORE_C_REGS_EXCEPT_RAX
+       RESTORE_EXTRA_REGS
+#if __SYSCALL_MASK == ~0
+       cmpq    $__NR_syscall_max, %rax
+#else
+       andl    $__SYSCALL_MASK, %eax
+       cmpl    $__NR_syscall_max, %eax
+#endif
+       ja      1f                              /* return -ENOSYS (already in pt_regs->ax) */
+       movq    %r10, %rcx                      /* fixup for C */
+       call    *sys_call_table(, %rax, 8)
+       movq    %rax, RAX(%rsp)
+1:
+       /* Use IRET because user could have changed pt_regs->foo */
+
+/*
+ * Syscall return path ending with IRET.
+ * Has correct iret frame.
+ */
+GLOBAL(int_ret_from_sys_call)
+       DISABLE_INTERRUPTS(CLBR_NONE)
+int_ret_from_sys_call_irqs_off: /* jumps come here from the irqs-off SYSRET path */
+       TRACE_IRQS_OFF
+       movl    $_TIF_ALLWORK_MASK, %edi
+       /* edi: mask to check */
+GLOBAL(int_with_check)
+       LOCKDEP_SYS_EXIT_IRQ
+       GET_THREAD_INFO(%rcx)
+       movl    TI_flags(%rcx), %edx
+       andl    %edi, %edx
+       jnz     int_careful
+       andl    $~TS_COMPAT, TI_status(%rcx)
+       jmp     syscall_return
+
+       /*
+        * Either reschedule or signal or syscall exit tracking needed.
+        * First do a reschedule test.
+        * edx: work, edi: workmask
+        */
+int_careful:
+       bt      $TIF_NEED_RESCHED, %edx
+       jnc     int_very_careful
+       TRACE_IRQS_ON
+       ENABLE_INTERRUPTS(CLBR_NONE)
+       pushq   %rdi
+       SCHEDULE_USER
+       popq    %rdi
+       DISABLE_INTERRUPTS(CLBR_NONE)
+       TRACE_IRQS_OFF
+       jmp     int_with_check
+
+       /* handle signals and tracing -- both require a full pt_regs */
+int_very_careful:
+       TRACE_IRQS_ON
+       ENABLE_INTERRUPTS(CLBR_NONE)
+       SAVE_EXTRA_REGS
+       /* Check for syscall exit trace */
+       testl   $_TIF_WORK_SYSCALL_EXIT, %edx
+       jz      int_signal
+       pushq   %rdi
+       leaq    8(%rsp), %rdi                   /* &ptregs -> arg1 */
+       call    syscall_trace_leave
+       popq    %rdi
+       andl    $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU), %edi
+       jmp     int_restore_rest
+
+int_signal:
+       testl   $_TIF_DO_NOTIFY_MASK, %edx
+       jz      1f
+       movq    %rsp, %rdi                      /* &ptregs -> arg1 */
+       xorl    %esi, %esi                      /* oldset -> arg2 */
+       call    do_notify_resume
+1:     movl    $_TIF_WORK_MASK, %edi
+int_restore_rest:
+       RESTORE_EXTRA_REGS
+       DISABLE_INTERRUPTS(CLBR_NONE)
+       TRACE_IRQS_OFF
+       jmp     int_with_check
+
+syscall_return:
+       /* The IRETQ could re-enable interrupts: */
+       DISABLE_INTERRUPTS(CLBR_ANY)
+       TRACE_IRQS_IRETQ
+
+       /*
+        * Try to use SYSRET instead of IRET if we're returning to
+        * a completely clean 64-bit userspace context.
+        */
+       movq    RCX(%rsp), %rcx
+       movq    RIP(%rsp), %r11
+       cmpq    %rcx, %r11                      /* RCX == RIP */
+       jne     opportunistic_sysret_failed
+
+       /*
+        * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP
+        * in kernel space.  This essentially lets the user take over
+        * the kernel, since userspace controls RSP.
+        *
+        * If width of "canonical tail" ever becomes variable, this will need
+        * to be updated to remain correct on both old and new CPUs.
+        */
+       .ifne __VIRTUAL_MASK_SHIFT - 47
+       .error "virtual address width changed -- SYSRET checks need update"
+       .endif
+
+       /* Change top 16 bits to be the sign-extension of 47th bit */
+       shl     $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
+       sar     $(64 - (__VIRTUAL_MASK_SHIFT+1)), %rcx
+
+       /* If this changed %rcx, it was not canonical */
+       cmpq    %rcx, %r11
+       jne     opportunistic_sysret_failed
+
+       cmpq    $__USER_CS, CS(%rsp)            /* CS must match SYSRET */
+       jne     opportunistic_sysret_failed
+
+       movq    R11(%rsp), %r11
+       cmpq    %r11, EFLAGS(%rsp)              /* R11 == RFLAGS */
+       jne     opportunistic_sysret_failed
+
+       /*
+        * SYSRET can't restore RF.  SYSRET can restore TF, but unlike IRET,
+        * restoring TF results in a trap from userspace immediately after
+        * SYSRET.  This would cause an infinite loop whenever #DB happens
+        * with register state that satisfies the opportunistic SYSRET
+        * conditions.  For example, single-stepping this user code:
+        *
+        *           movq       $stuck_here, %rcx
+        *           pushfq
+        *           popq %r11
+        *   stuck_here:
+        *
+        * would never get past 'stuck_here'.
+        */
+       testq   $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
+       jnz     opportunistic_sysret_failed
+
+       /* nothing to check for RSP */
+
+       cmpq    $__USER_DS, SS(%rsp)            /* SS must match SYSRET */
+       jne     opportunistic_sysret_failed
+
+       /*
+        * We win! This label is here just for ease of understanding
+        * perf profiles. Nothing jumps here.
+        */
+syscall_return_via_sysret:
+       /* rcx and r11 are already restored (see code above) */
+       RESTORE_C_REGS_EXCEPT_RCX_R11
+       movq    RSP(%rsp), %rsp
+       USERGS_SYSRET64
+
+opportunistic_sysret_failed:
+       SWAPGS
+       jmp     restore_c_regs_and_iret
+END(entry_SYSCALL_64)
+
+
+       .macro FORK_LIKE func
+ENTRY(stub_\func)
+       SAVE_EXTRA_REGS 8
+       jmp     sys_\func
+END(stub_\func)
+       .endm
+
+       FORK_LIKE  clone
+       FORK_LIKE  fork
+       FORK_LIKE  vfork
+
+ENTRY(stub_execve)
+       call    sys_execve
+return_from_execve:
+       testl   %eax, %eax
+       jz      1f
+       /* exec failed, can use fast SYSRET code path in this case */
+       ret
+1:
+       /* must use IRET code path (pt_regs->cs may have changed) */
+       addq    $8, %rsp
+       ZERO_EXTRA_REGS
+       movq    %rax, RAX(%rsp)
+       jmp     int_ret_from_sys_call
+END(stub_execve)
+/*
+ * Remaining execve stubs are only 7 bytes long.
+ * ENTRY() often aligns to 16 bytes, which in this case has no benefits.
+ */
+       .align  8
+GLOBAL(stub_execveat)
+       call    sys_execveat
+       jmp     return_from_execve
+END(stub_execveat)
+
+#if defined(CONFIG_X86_X32_ABI) || defined(CONFIG_IA32_EMULATION)
+       .align  8
+GLOBAL(stub_x32_execve)
+GLOBAL(stub32_execve)
+       call    compat_sys_execve
+       jmp     return_from_execve
+END(stub32_execve)
+END(stub_x32_execve)
+       .align  8
+GLOBAL(stub_x32_execveat)
+GLOBAL(stub32_execveat)
+       call    compat_sys_execveat
+       jmp     return_from_execve
+END(stub32_execveat)
+END(stub_x32_execveat)
+#endif
+
+/*
+ * sigreturn is special because it needs to restore all registers on return.
+ * This cannot be done with SYSRET, so use the IRET return path instead.
+ */
+ENTRY(stub_rt_sigreturn)
+       /*
+        * SAVE_EXTRA_REGS result is not normally needed:
+        * sigreturn overwrites all pt_regs->GPREGS.
+        * But sigreturn can fail (!), and there is no easy way to detect that.
+        * To make sure RESTORE_EXTRA_REGS doesn't restore garbage on error,
+        * we SAVE_EXTRA_REGS here.
+        */
+       SAVE_EXTRA_REGS 8
+       call    sys_rt_sigreturn
+return_from_stub:
+       addq    $8, %rsp
+       RESTORE_EXTRA_REGS
+       movq    %rax, RAX(%rsp)
+       jmp     int_ret_from_sys_call
+END(stub_rt_sigreturn)
+
+#ifdef CONFIG_X86_X32_ABI
+ENTRY(stub_x32_rt_sigreturn)
+       SAVE_EXTRA_REGS 8
+       call    sys32_x32_rt_sigreturn
+       jmp     return_from_stub
+END(stub_x32_rt_sigreturn)
+#endif
+
+/*
+ * A newly forked process directly context switches into this address.
+ *
+ * rdi: prev task we switched from
+ */
+ENTRY(ret_from_fork)
+
+       LOCK ; btr $TIF_FORK, TI_flags(%r8)
+
+       pushq   $0x0002
+       popfq                                   /* reset kernel eflags */
+
+       call    schedule_tail                   /* rdi: 'prev' task parameter */
+
+       RESTORE_EXTRA_REGS
+
+       testb   $3, CS(%rsp)                    /* from kernel_thread? */
+
+       /*
+        * By the time we get here, we have no idea whether our pt_regs,
+        * ti flags, and ti status came from the 64-bit SYSCALL fast path,
+        * the slow path, or one of the 32-bit compat paths.
+        * Use IRET code path to return, since it can safely handle
+        * all of the above.
+        */
+       jnz     int_ret_from_sys_call
+
+       /*
+        * We came from kernel_thread
+        * nb: we depend on RESTORE_EXTRA_REGS above
+        */
+       movq    %rbp, %rdi
+       call    *%rbx
+       movl    $0, RAX(%rsp)
+       RESTORE_EXTRA_REGS
+       jmp     int_ret_from_sys_call
+END(ret_from_fork)
+
+/*
+ * Build the entry stubs with some assembler magic.
+ * We pack 1 stub into every 8-byte block.
+ */
+       .align 8
+ENTRY(irq_entries_start)
+    vector=FIRST_EXTERNAL_VECTOR
+    .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
+       pushq   $(~vector+0x80)                 /* Note: always in signed byte range */
+    vector=vector+1
+       jmp     common_interrupt
+       .align  8
+    .endr
+END(irq_entries_start)
+
+/*
+ * Interrupt entry/exit.
+ *
+ * Interrupt entry points save only callee clobbered registers in fast path.
+ *
+ * Entry runs with interrupts off.
+ */
+
+/* 0(%rsp): ~(interrupt number) */
+       .macro interrupt func
+       cld
+       /*
+        * Since nothing in interrupt handling code touches r12...r15 members
+        * of "struct pt_regs", and since interrupts can nest, we can save
+        * four stack slots and simultaneously provide
+        * an unwind-friendly stack layout by saving "truncated" pt_regs
+        * exactly up to rbp slot, without these members.
+        */
+       ALLOC_PT_GPREGS_ON_STACK -RBP
+       SAVE_C_REGS -RBP
+       /* this goes to 0(%rsp) for unwinder, not for saving the value: */
+       SAVE_EXTRA_REGS_RBP -RBP
+
+       leaq    -RBP(%rsp), %rdi                /* arg1 for \func (pointer to pt_regs) */
+
+       testb   $3, CS-RBP(%rsp)
+       jz      1f
+       SWAPGS
+1:
+       /*
+        * Save previous stack pointer, optionally switch to interrupt stack.
+        * irq_count is used to check if a CPU is already on an interrupt stack
+        * or not. While this is essentially redundant with preempt_count it is
+        * a little cheaper to use a separate counter in the PDA (short of
+        * moving irq_enter into assembly, which would be too much work)
+        */
+       movq    %rsp, %rsi
+       incl    PER_CPU_VAR(irq_count)
+       cmovzq  PER_CPU_VAR(irq_stack_ptr), %rsp
+       pushq   %rsi
+       /* We entered an interrupt context - irqs are off: */
+       TRACE_IRQS_OFF
+
+       call    \func
+       .endm
+
+       /*
+        * The interrupt stubs push (~vector+0x80) onto the stack and
+        * then jump to common_interrupt.
+        */
+       .p2align CONFIG_X86_L1_CACHE_SHIFT
+common_interrupt:
+       ASM_CLAC
+       addq    $-0x80, (%rsp)                  /* Adjust vector to [-256, -1] range */
+       interrupt do_IRQ
+       /* 0(%rsp): old RSP */
+ret_from_intr:
+       DISABLE_INTERRUPTS(CLBR_NONE)
+       TRACE_IRQS_OFF
+       decl    PER_CPU_VAR(irq_count)
+
+       /* Restore saved previous stack */
+       popq    %rsi
+       /* return code expects complete pt_regs - adjust rsp accordingly: */
+       leaq    -RBP(%rsi), %rsp
+
+       testb   $3, CS(%rsp)
+       jz      retint_kernel
+       /* Interrupt came from user space */
+retint_user:
+       GET_THREAD_INFO(%rcx)
+
+       /* %rcx: thread info. Interrupts are off. */
+retint_with_reschedule:
+       movl    $_TIF_WORK_MASK, %edi
+retint_check:
+       LOCKDEP_SYS_EXIT_IRQ
+       movl    TI_flags(%rcx), %edx
+       andl    %edi, %edx
+       jnz     retint_careful
+
+retint_swapgs:                                 /* return to user-space */
+       /*
+        * The iretq could re-enable interrupts:
+        */
+       DISABLE_INTERRUPTS(CLBR_ANY)
+       TRACE_IRQS_IRETQ
+
+       SWAPGS
+       jmp     restore_c_regs_and_iret
+
+/* Returning to kernel space */
+retint_kernel:
+#ifdef CONFIG_PREEMPT
+       /* Interrupts are off */
+       /* Check if we need preemption */
+       bt      $9, EFLAGS(%rsp)                /* were interrupts off? */
+       jnc     1f
+0:     cmpl    $0, PER_CPU_VAR(__preempt_count)
+       jnz     1f
+       call    preempt_schedule_irq
+       jmp     0b
+1:
+#endif
+       /*
+        * The iretq could re-enable interrupts:
+        */
+       TRACE_IRQS_IRETQ
+
+/*
+ * At this label, code paths which return to kernel and to user,
+ * which come from interrupts/exception and from syscalls, merge.
+ */
+restore_c_regs_and_iret:
+       RESTORE_C_REGS
+       REMOVE_PT_GPREGS_FROM_STACK 8
+       INTERRUPT_RETURN
+
+ENTRY(native_iret)
+       /*
+        * Are we returning to a stack segment from the LDT?  Note: in
+        * 64-bit mode SS:RSP on the exception stack is always valid.
+        */
+#ifdef CONFIG_X86_ESPFIX64
+       testb   $4, (SS-RIP)(%rsp)
+       jnz     native_irq_return_ldt
+#endif
+
+.global native_irq_return_iret
+native_irq_return_iret:
+       /*
+        * This may fault.  Non-paranoid faults on return to userspace are
+        * handled by fixup_bad_iret.  These include #SS, #GP, and #NP.
+        * Double-faults due to espfix64 are handled in do_double_fault.
+        * Other faults here are fatal.
+        */
+       iretq
+
+#ifdef CONFIG_X86_ESPFIX64
+native_irq_return_ldt:
+       pushq   %rax
+       pushq   %rdi
+       SWAPGS
+       movq    PER_CPU_VAR(espfix_waddr), %rdi
+       movq    %rax, (0*8)(%rdi)               /* RAX */
+       movq    (2*8)(%rsp), %rax               /* RIP */
+       movq    %rax, (1*8)(%rdi)
+       movq    (3*8)(%rsp), %rax               /* CS */
+       movq    %rax, (2*8)(%rdi)
+       movq    (4*8)(%rsp), %rax               /* RFLAGS */
+       movq    %rax, (3*8)(%rdi)
+       movq    (6*8)(%rsp), %rax               /* SS */
+       movq    %rax, (5*8)(%rdi)
+       movq    (5*8)(%rsp), %rax               /* RSP */
+       movq    %rax, (4*8)(%rdi)
+       andl    $0xffff0000, %eax
+       popq    %rdi
+       orq     PER_CPU_VAR(espfix_stack), %rax
+       SWAPGS
+       movq    %rax, %rsp
+       popq    %rax
+       jmp     native_irq_return_iret
+#endif
+
+       /* edi: workmask, edx: work */
+retint_careful:
+       bt      $TIF_NEED_RESCHED, %edx
+       jnc     retint_signal
+       TRACE_IRQS_ON
+       ENABLE_INTERRUPTS(CLBR_NONE)
+       pushq   %rdi
+       SCHEDULE_USER
+       popq    %rdi
+       GET_THREAD_INFO(%rcx)
+       DISABLE_INTERRUPTS(CLBR_NONE)
+       TRACE_IRQS_OFF
+       jmp     retint_check
+
+retint_signal:
+       testl   $_TIF_DO_NOTIFY_MASK, %edx
+       jz      retint_swapgs
+       TRACE_IRQS_ON
+       ENABLE_INTERRUPTS(CLBR_NONE)
+       SAVE_EXTRA_REGS
+       movq    $-1, ORIG_RAX(%rsp)
+       xorl    %esi, %esi                      /* oldset */
+       movq    %rsp, %rdi                      /* &pt_regs */
+       call    do_notify_resume
+       RESTORE_EXTRA_REGS
+       DISABLE_INTERRUPTS(CLBR_NONE)
+       TRACE_IRQS_OFF
+       GET_THREAD_INFO(%rcx)
+       jmp     retint_with_reschedule
+
+END(common_interrupt)
+
+/*
+ * APIC interrupts.
+ */
+.macro apicinterrupt3 num sym do_sym
+ENTRY(\sym)
+       ASM_CLAC
+       pushq   $~(\num)
+.Lcommon_\sym:
+       interrupt \do_sym
+       jmp     ret_from_intr
+END(\sym)
+.endm
+
+#ifdef CONFIG_TRACING
+#define trace(sym) trace_##sym
+#define smp_trace(sym) smp_trace_##sym
+
+.macro trace_apicinterrupt num sym
+apicinterrupt3 \num trace(\sym) smp_trace(\sym)
+.endm
+#else
+.macro trace_apicinterrupt num sym do_sym
+.endm
+#endif
+
+.macro apicinterrupt num sym do_sym
+apicinterrupt3 \num \sym \do_sym
+trace_apicinterrupt \num \sym
+.endm
+
+#ifdef CONFIG_SMP
+apicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR         irq_move_cleanup_interrupt      smp_irq_move_cleanup_interrupt
+apicinterrupt3 REBOOT_VECTOR                   reboot_interrupt                smp_reboot_interrupt
+#endif
+
+#ifdef CONFIG_X86_UV
+apicinterrupt3 UV_BAU_MESSAGE                  uv_bau_message_intr1            uv_bau_message_interrupt
+#endif
+
+apicinterrupt LOCAL_TIMER_VECTOR               apic_timer_interrupt            smp_apic_timer_interrupt
+apicinterrupt X86_PLATFORM_IPI_VECTOR          x86_platform_ipi                smp_x86_platform_ipi
+
+#ifdef CONFIG_HAVE_KVM
+apicinterrupt3 POSTED_INTR_VECTOR              kvm_posted_intr_ipi             smp_kvm_posted_intr_ipi
+apicinterrupt3 POSTED_INTR_WAKEUP_VECTOR       kvm_posted_intr_wakeup_ipi      smp_kvm_posted_intr_wakeup_ipi
+#endif
+
+#ifdef CONFIG_X86_MCE_THRESHOLD
+apicinterrupt THRESHOLD_APIC_VECTOR            threshold_interrupt             smp_threshold_interrupt
+#endif
+
+#ifdef CONFIG_X86_MCE_AMD
+apicinterrupt DEFERRED_ERROR_VECTOR            deferred_error_interrupt        smp_deferred_error_interrupt
+#endif
+
+#ifdef CONFIG_X86_THERMAL_VECTOR
+apicinterrupt THERMAL_APIC_VECTOR              thermal_interrupt               smp_thermal_interrupt
+#endif
+
+#ifdef CONFIG_SMP
+apicinterrupt CALL_FUNCTION_SINGLE_VECTOR      call_function_single_interrupt  smp_call_function_single_interrupt
+apicinterrupt CALL_FUNCTION_VECTOR             call_function_interrupt         smp_call_function_interrupt
+apicinterrupt RESCHEDULE_VECTOR                        reschedule_interrupt            smp_reschedule_interrupt
+#endif
+
+apicinterrupt ERROR_APIC_VECTOR                        error_interrupt                 smp_error_interrupt
+apicinterrupt SPURIOUS_APIC_VECTOR             spurious_interrupt              smp_spurious_interrupt
+
+#ifdef CONFIG_IRQ_WORK
+apicinterrupt IRQ_WORK_VECTOR                  irq_work_interrupt              smp_irq_work_interrupt
+#endif
+
+/*
+ * Exception entry points.
+ */
+#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8)
+
+.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
+ENTRY(\sym)
+       /* Sanity check */
+       .if \shift_ist != -1 && \paranoid == 0
+       .error "using shift_ist requires paranoid=1"
+       .endif
+
+       ASM_CLAC
+       PARAVIRT_ADJUST_EXCEPTION_FRAME
+
+       .ifeq \has_error_code
+       pushq   $-1                             /* ORIG_RAX: no syscall to restart */
+       .endif
+
+       ALLOC_PT_GPREGS_ON_STACK
+
+       .if \paranoid
+       .if \paranoid == 1
+       testb   $3, CS(%rsp)                    /* If coming from userspace, switch stacks */
+       jnz     1f
+       .endif
+       call    paranoid_entry
+       .else
+       call    error_entry
+       .endif
+       /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */
+
+       .if \paranoid
+       .if \shift_ist != -1
+       TRACE_IRQS_OFF_DEBUG                    /* reload IDT in case of recursion */
+       .else
+       TRACE_IRQS_OFF
+       .endif
+       .endif
+
+       movq    %rsp, %rdi                      /* pt_regs pointer */
+
+       .if \has_error_code
+       movq    ORIG_RAX(%rsp), %rsi            /* get error code */
+       movq    $-1, ORIG_RAX(%rsp)             /* no syscall to restart */
+       .else
+       xorl    %esi, %esi                      /* no error code */
+       .endif
+
+       .if \shift_ist != -1
+       subq    $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
+       .endif
+
+       call    \do_sym
+
+       .if \shift_ist != -1
+       addq    $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
+       .endif
+
+       /* these procedures expect "no swapgs" flag in ebx */
+       .if \paranoid
+       jmp     paranoid_exit
+       .else
+       jmp     error_exit
+       .endif
+
+       .if \paranoid == 1
+       /*
+        * Paranoid entry from userspace.  Switch stacks and treat it
+        * as a normal entry.  This means that paranoid handlers
+        * run in real process context if user_mode(regs).
+        */
+1:
+       call    error_entry
+
+
+       movq    %rsp, %rdi                      /* pt_regs pointer */
+       call    sync_regs
+       movq    %rax, %rsp                      /* switch stack */
+
+       movq    %rsp, %rdi                      /* pt_regs pointer */
+
+       .if \has_error_code
+       movq    ORIG_RAX(%rsp), %rsi            /* get error code */
+       movq    $-1, ORIG_RAX(%rsp)             /* no syscall to restart */
+       .else
+       xorl    %esi, %esi                      /* no error code */
+       .endif
+
+       call    \do_sym
+
+       jmp     error_exit                      /* %ebx: no swapgs flag */
+       .endif
+END(\sym)
+.endm
+
+#ifdef CONFIG_TRACING
+.macro trace_idtentry sym do_sym has_error_code:req
+idtentry trace(\sym) trace(\do_sym) has_error_code=\has_error_code
+idtentry \sym \do_sym has_error_code=\has_error_code
+.endm
+#else
+.macro trace_idtentry sym do_sym has_error_code:req
+idtentry \sym \do_sym has_error_code=\has_error_code
+.endm
+#endif
+
+idtentry divide_error                  do_divide_error                 has_error_code=0
+idtentry overflow                      do_overflow                     has_error_code=0
+idtentry bounds                                do_bounds                       has_error_code=0
+idtentry invalid_op                    do_invalid_op                   has_error_code=0
+idtentry device_not_available          do_device_not_available         has_error_code=0
+idtentry double_fault                  do_double_fault                 has_error_code=1 paranoid=2
+idtentry coprocessor_segment_overrun   do_coprocessor_segment_overrun  has_error_code=0
+idtentry invalid_TSS                   do_invalid_TSS                  has_error_code=1
+idtentry segment_not_present           do_segment_not_present          has_error_code=1
+idtentry spurious_interrupt_bug                do_spurious_interrupt_bug       has_error_code=0
+idtentry coprocessor_error             do_coprocessor_error            has_error_code=0
+idtentry alignment_check               do_alignment_check              has_error_code=1
+idtentry simd_coprocessor_error                do_simd_coprocessor_error       has_error_code=0
+
+
+       /*
+        * Reload gs selector with exception handling
+        * edi:  new selector
+        */
+ENTRY(native_load_gs_index)
+       pushfq
+       DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
+       SWAPGS
+gs_change:
+       movl    %edi, %gs
+2:     mfence                                  /* workaround */
+       SWAPGS
+       popfq
+       ret
+END(native_load_gs_index)
+
+       _ASM_EXTABLE(gs_change, bad_gs)
+       .section .fixup, "ax"
+       /* running with kernelgs */
+bad_gs:
+       SWAPGS                                  /* switch back to user gs */
+       xorl    %eax, %eax
+       movl    %eax, %gs
+       jmp     2b
+       .previous
+
+/* Call softirq on interrupt stack. Interrupts are off. */
+ENTRY(do_softirq_own_stack)
+       pushq   %rbp
+       mov     %rsp, %rbp
+       incl    PER_CPU_VAR(irq_count)
+       cmove   PER_CPU_VAR(irq_stack_ptr), %rsp
+       push    %rbp                            /* frame pointer backlink */
+       call    __do_softirq
+       leaveq
+       decl    PER_CPU_VAR(irq_count)
+       ret
+END(do_softirq_own_stack)
+
+#ifdef CONFIG_XEN
+idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
+
+/*
+ * A note on the "critical region" in our callback handler.
+ * We want to avoid stacking callback handlers due to events occurring
+ * during handling of the last event. To do this, we keep events disabled
+ * until we've done all processing. HOWEVER, we must enable events before
+ * popping the stack frame (can't be done atomically) and so it would still
+ * be possible to get enough handler activations to overflow the stack.
+ * Although unlikely, bugs of that kind are hard to track down, so we'd
+ * like to avoid the possibility.
+ * So, on entry to the handler we detect whether we interrupted an
+ * existing activation in its critical region -- if so, we pop the current
+ * activation and restart the handler using the previous one.
+ */
+ENTRY(xen_do_hypervisor_callback)              /* do_hypervisor_callback(struct *pt_regs) */
+
+/*
+ * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
+ * see the correct pointer to the pt_regs
+ */
+       movq    %rdi, %rsp                      /* we don't return, adjust the stack frame */
+11:    incl    PER_CPU_VAR(irq_count)
+       movq    %rsp, %rbp
+       cmovzq  PER_CPU_VAR(irq_stack_ptr), %rsp
+       pushq   %rbp                            /* frame pointer backlink */
+       call    xen_evtchn_do_upcall
+       popq    %rsp
+       decl    PER_CPU_VAR(irq_count)
+#ifndef CONFIG_PREEMPT
+       call    xen_maybe_preempt_hcall
+#endif
+       jmp     error_exit
+END(xen_do_hypervisor_callback)
+
+/*
+ * Hypervisor uses this for application faults while it executes.
+ * We get here for two reasons:
+ *  1. Fault while reloading DS, ES, FS or GS
+ *  2. Fault while executing IRET
+ * Category 1 we do not need to fix up as Xen has already reloaded all segment
+ * registers that could be reloaded and zeroed the others.
+ * Category 2 we fix up by killing the current process. We cannot use the
+ * normal Linux return path in this case because if we use the IRET hypercall
+ * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
+ * We distinguish between categories by comparing each saved segment register
+ * with its current contents: any discrepancy means we in category 1.
+ */
+ENTRY(xen_failsafe_callback)
+       movl    %ds, %ecx
+       cmpw    %cx, 0x10(%rsp)
+       jne     1f
+       movl    %es, %ecx
+       cmpw    %cx, 0x18(%rsp)
+       jne     1f
+       movl    %fs, %ecx
+       cmpw    %cx, 0x20(%rsp)
+       jne     1f
+       movl    %gs, %ecx
+       cmpw    %cx, 0x28(%rsp)
+       jne     1f
+       /* All segments match their saved values => Category 2 (Bad IRET). */
+       movq    (%rsp), %rcx
+       movq    8(%rsp), %r11
+       addq    $0x30, %rsp
+       pushq   $0                              /* RIP */
+       pushq   %r11
+       pushq   %rcx
+       jmp     general_protection
+1:     /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
+       movq    (%rsp), %rcx
+       movq    8(%rsp), %r11
+       addq    $0x30, %rsp
+       pushq   $-1 /* orig_ax = -1 => not a system call */
+       ALLOC_PT_GPREGS_ON_STACK
+       SAVE_C_REGS
+       SAVE_EXTRA_REGS
+       jmp     error_exit
+END(xen_failsafe_callback)
+
+apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
+       xen_hvm_callback_vector xen_evtchn_do_upcall
+
+#endif /* CONFIG_XEN */
+
+#if IS_ENABLED(CONFIG_HYPERV)
+apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
+       hyperv_callback_vector hyperv_vector_handler
+#endif /* CONFIG_HYPERV */
+
+idtentry debug                 do_debug                has_error_code=0        paranoid=1 shift_ist=DEBUG_STACK
+idtentry int3                  do_int3                 has_error_code=0        paranoid=1 shift_ist=DEBUG_STACK
+idtentry stack_segment         do_stack_segment        has_error_code=1
+
+#ifdef CONFIG_XEN
+idtentry xen_debug             do_debug                has_error_code=0
+idtentry xen_int3              do_int3                 has_error_code=0
+idtentry xen_stack_segment     do_stack_segment        has_error_code=1
+#endif
+
+idtentry general_protection    do_general_protection   has_error_code=1
+trace_idtentry page_fault      do_page_fault           has_error_code=1
+
+#ifdef CONFIG_KVM_GUEST
+idtentry async_page_fault      do_async_page_fault     has_error_code=1
+#endif
+
+#ifdef CONFIG_X86_MCE
+idtentry machine_check                                 has_error_code=0        paranoid=1 do_sym=*machine_check_vector(%rip)
+#endif
+
+/*
+ * Save all registers in pt_regs, and switch gs if needed.
+ * Use slow, but surefire "are we in kernel?" check.
+ * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
+ */
+ENTRY(paranoid_entry)
+       cld
+       SAVE_C_REGS 8
+       SAVE_EXTRA_REGS 8
+       movl    $1, %ebx
+       movl    $MSR_GS_BASE, %ecx
+       rdmsr
+       testl   %edx, %edx
+       js      1f                              /* negative -> in kernel */
+       SWAPGS
+       xorl    %ebx, %ebx
+1:     ret
+END(paranoid_entry)
+
+/*
+ * "Paranoid" exit path from exception stack.  This is invoked
+ * only on return from non-NMI IST interrupts that came
+ * from kernel space.
+ *
+ * We may be returning to very strange contexts (e.g. very early
+ * in syscall entry), so checking for preemption here would
+ * be complicated.  Fortunately, we there's no good reason
+ * to try to handle preemption here.
+ *
+ * On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it)
+ */
+ENTRY(paranoid_exit)
+       DISABLE_INTERRUPTS(CLBR_NONE)
+       TRACE_IRQS_OFF_DEBUG
+       testl   %ebx, %ebx                      /* swapgs needed? */
+       jnz     paranoid_exit_no_swapgs
+       TRACE_IRQS_IRETQ
+       SWAPGS_UNSAFE_STACK
+       jmp     paranoid_exit_restore
+paranoid_exit_no_swapgs:
+       TRACE_IRQS_IRETQ_DEBUG
+paranoid_exit_restore:
+       RESTORE_EXTRA_REGS
+       RESTORE_C_REGS
+       REMOVE_PT_GPREGS_FROM_STACK 8
+       INTERRUPT_RETURN
+END(paranoid_exit)
+
+/*
+ * Save all registers in pt_regs, and switch gs if needed.
+ * Return: EBX=0: came from user mode; EBX=1: otherwise
+ */
+ENTRY(error_entry)
+       cld
+       SAVE_C_REGS 8
+       SAVE_EXTRA_REGS 8
+       xorl    %ebx, %ebx
+       testb   $3, CS+8(%rsp)
+       jz      error_kernelspace
+
+       /* We entered from user mode */
+       SWAPGS
+
+error_entry_done:
+       TRACE_IRQS_OFF
+       ret
+
+       /*
+        * There are two places in the kernel that can potentially fault with
+        * usergs. Handle them here.  B stepping K8s sometimes report a
+        * truncated RIP for IRET exceptions returning to compat mode. Check
+        * for these here too.
+        */
+error_kernelspace:
+       incl    %ebx
+       leaq    native_irq_return_iret(%rip), %rcx
+       cmpq    %rcx, RIP+8(%rsp)
+       je      error_bad_iret
+       movl    %ecx, %eax                      /* zero extend */
+       cmpq    %rax, RIP+8(%rsp)
+       je      bstep_iret
+       cmpq    $gs_change, RIP+8(%rsp)
+       jne     error_entry_done
+
+       /*
+        * hack: gs_change can fail with user gsbase.  If this happens, fix up
+        * gsbase and proceed.  We'll fix up the exception and land in
+        * gs_change's error handler with kernel gsbase.
+        */
+       SWAPGS
+       jmp     error_entry_done
+
+bstep_iret:
+       /* Fix truncated RIP */
+       movq    %rcx, RIP+8(%rsp)
+       /* fall through */
+
+error_bad_iret:
+       /*
+        * We came from an IRET to user mode, so we have user gsbase.
+        * Switch to kernel gsbase:
+        */
+       SWAPGS
+
+       /*
+        * Pretend that the exception came from user mode: set up pt_regs
+        * as if we faulted immediately after IRET and clear EBX so that
+        * error_exit knows that we will be returning to user mode.
+        */
+       mov     %rsp, %rdi
+       call    fixup_bad_iret
+       mov     %rax, %rsp
+       decl    %ebx
+       jmp     error_entry_done
+END(error_entry)
+
+
+/*
+ * On entry, EBS is a "return to kernel mode" flag:
+ *   1: already in kernel mode, don't need SWAPGS
+ *   0: user gsbase is loaded, we need SWAPGS and standard preparation for return to usermode
+ */
+ENTRY(error_exit)
+       movl    %ebx, %eax
+       RESTORE_EXTRA_REGS
+       DISABLE_INTERRUPTS(CLBR_NONE)
+       TRACE_IRQS_OFF
+       testl   %eax, %eax
+       jnz     retint_kernel
+       jmp     retint_user
+END(error_exit)
+
+/* Runs on exception stack */
+ENTRY(nmi)
+       PARAVIRT_ADJUST_EXCEPTION_FRAME
+       /*
+        * We allow breakpoints in NMIs. If a breakpoint occurs, then
+        * the iretq it performs will take us out of NMI context.
+        * This means that we can have nested NMIs where the next
+        * NMI is using the top of the stack of the previous NMI. We
+        * can't let it execute because the nested NMI will corrupt the
+        * stack of the previous NMI. NMI handlers are not re-entrant
+        * anyway.
+        *
+        * To handle this case we do the following:
+        *  Check the a special location on the stack that contains
+        *  a variable that is set when NMIs are executing.
+        *  The interrupted task's stack is also checked to see if it
+        *  is an NMI stack.
+        *  If the variable is not set and the stack is not the NMI
+        *  stack then:
+        *    o Set the special variable on the stack
+        *    o Copy the interrupt frame into a "saved" location on the stack
+        *    o Copy the interrupt frame into a "copy" location on the stack
+        *    o Continue processing the NMI
+        *  If the variable is set or the previous stack is the NMI stack:
+        *    o Modify the "copy" location to jump to the repeate_nmi
+        *    o return back to the first NMI
+        *
+        * Now on exit of the first NMI, we first clear the stack variable
+        * The NMI stack will tell any nested NMIs at that point that it is
+        * nested. Then we pop the stack normally with iret, and if there was
+        * a nested NMI that updated the copy interrupt stack frame, a
+        * jump will be made to the repeat_nmi code that will handle the second
+        * NMI.
+        */
+
+       /* Use %rdx as our temp variable throughout */
+       pushq   %rdx
+
+       /*
+        * If %cs was not the kernel segment, then the NMI triggered in user
+        * space, which means it is definitely not nested.
+        */
+       cmpl    $__KERNEL_CS, 16(%rsp)
+       jne     first_nmi
+
+       /*
+        * Check the special variable on the stack to see if NMIs are
+        * executing.
+        */
+       cmpl    $1, -8(%rsp)
+       je      nested_nmi
+
+       /*
+        * Now test if the previous stack was an NMI stack.
+        * We need the double check. We check the NMI stack to satisfy the
+        * race when the first NMI clears the variable before returning.
+        * We check the variable because the first NMI could be in a
+        * breakpoint routine using a breakpoint stack.
+        */
+       lea     6*8(%rsp), %rdx
+       /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
+       cmpq    %rdx, 4*8(%rsp)
+       /* If the stack pointer is above the NMI stack, this is a normal NMI */
+       ja      first_nmi
+
+       subq    $EXCEPTION_STKSZ, %rdx
+       cmpq    %rdx, 4*8(%rsp)
+       /* If it is below the NMI stack, it is a normal NMI */
+       jb      first_nmi
+       /* Ah, it is within the NMI stack, treat it as nested */
+
+nested_nmi:
+       /*
+        * Do nothing if we interrupted the fixup in repeat_nmi.
+        * It's about to repeat the NMI handler, so we are fine
+        * with ignoring this one.
+        */
+       movq    $repeat_nmi, %rdx
+       cmpq    8(%rsp), %rdx
+       ja      1f
+       movq    $end_repeat_nmi, %rdx
+       cmpq    8(%rsp), %rdx
+       ja      nested_nmi_out
+
+1:
+       /* Set up the interrupted NMIs stack to jump to repeat_nmi */
+       leaq    -1*8(%rsp), %rdx
+       movq    %rdx, %rsp
+       leaq    -10*8(%rsp), %rdx
+       pushq   $__KERNEL_DS
+       pushq   %rdx
+       pushfq
+       pushq   $__KERNEL_CS
+       pushq   $repeat_nmi
+
+       /* Put stack back */
+       addq    $(6*8), %rsp
+
+nested_nmi_out:
+       popq    %rdx
+
+       /* No need to check faults here */
+       INTERRUPT_RETURN
+
+first_nmi:
+       /*
+        * Because nested NMIs will use the pushed location that we
+        * stored in rdx, we must keep that space available.
+        * Here's what our stack frame will look like:
+        * +-------------------------+
+        * | original SS             |
+        * | original Return RSP     |
+        * | original RFLAGS         |
+        * | original CS             |
+        * | original RIP            |
+        * +-------------------------+
+        * | temp storage for rdx    |
+        * +-------------------------+
+        * | NMI executing variable  |
+        * +-------------------------+
+        * | copied SS               |
+        * | copied Return RSP       |
+        * | copied RFLAGS           |
+        * | copied CS               |
+        * | copied RIP              |
+        * +-------------------------+
+        * | Saved SS                |
+        * | Saved Return RSP        |
+        * | Saved RFLAGS            |
+        * | Saved CS                |
+        * | Saved RIP               |
+        * +-------------------------+
+        * | pt_regs                 |
+        * +-------------------------+
+        *
+        * The saved stack frame is used to fix up the copied stack frame
+        * that a nested NMI may change to make the interrupted NMI iret jump
+        * to the repeat_nmi. The original stack frame and the temp storage
+        * is also used by nested NMIs and can not be trusted on exit.
+        */
+       /* Do not pop rdx, nested NMIs will corrupt that part of the stack */
+       movq    (%rsp), %rdx
+
+       /* Set the NMI executing variable on the stack. */
+       pushq   $1
+
+       /* Leave room for the "copied" frame */
+       subq    $(5*8), %rsp
+
+       /* Copy the stack frame to the Saved frame */
+       .rept 5
+       pushq   11*8(%rsp)
+       .endr
+
+       /* Everything up to here is safe from nested NMIs */
+
+       /*
+        * If there was a nested NMI, the first NMI's iret will return
+        * here. But NMIs are still enabled and we can take another
+        * nested NMI. The nested NMI checks the interrupted RIP to see
+        * if it is between repeat_nmi and end_repeat_nmi, and if so
+        * it will just return, as we are about to repeat an NMI anyway.
+        * This makes it safe to copy to the stack frame that a nested
+        * NMI will update.
+        */
+repeat_nmi:
+       /*
+        * Update the stack variable to say we are still in NMI (the update
+        * is benign for the non-repeat case, where 1 was pushed just above
+        * to this very stack slot).
+        */
+       movq    $1, 10*8(%rsp)
+
+       /* Make another copy, this one may be modified by nested NMIs */
+       addq    $(10*8), %rsp
+       .rept 5
+       pushq   -6*8(%rsp)
+       .endr
+       subq    $(5*8), %rsp
+end_repeat_nmi:
+
+       /*
+        * Everything below this point can be preempted by a nested
+        * NMI if the first NMI took an exception and reset our iret stack
+        * so that we repeat another NMI.
+        */
+       pushq   $-1                             /* ORIG_RAX: no syscall to restart */
+       ALLOC_PT_GPREGS_ON_STACK
+
+       /*
+        * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
+        * as we should not be calling schedule in NMI context.
+        * Even with normal interrupts enabled. An NMI should not be
+        * setting NEED_RESCHED or anything that normal interrupts and
+        * exceptions might do.
+        */
+       call    paranoid_entry
+
+       /*
+        * Save off the CR2 register. If we take a page fault in the NMI then
+        * it could corrupt the CR2 value. If the NMI preempts a page fault
+        * handler before it was able to read the CR2 register, and then the
+        * NMI itself takes a page fault, the page fault that was preempted
+        * will read the information from the NMI page fault and not the
+        * origin fault. Save it off and restore it if it changes.
+        * Use the r12 callee-saved register.
+        */
+       movq    %cr2, %r12
+
+       /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
+       movq    %rsp, %rdi
+       movq    $-1, %rsi
+       call    do_nmi
+
+       /* Did the NMI take a page fault? Restore cr2 if it did */
+       movq    %cr2, %rcx
+       cmpq    %rcx, %r12
+       je      1f
+       movq    %r12, %cr2
+1:
+       testl   %ebx, %ebx                      /* swapgs needed? */
+       jnz     nmi_restore
+nmi_swapgs:
+       SWAPGS_UNSAFE_STACK
+nmi_restore:
+       RESTORE_EXTRA_REGS
+       RESTORE_C_REGS
+       /* Pop the extra iret frame at once */
+       REMOVE_PT_GPREGS_FROM_STACK 6*8
+
+       /* Clear the NMI executing stack variable */
+       movq    $0, 5*8(%rsp)
+       INTERRUPT_RETURN
+END(nmi)
+
+ENTRY(ignore_sysret)
+       mov     $-ENOSYS, %eax
+       sysret
+END(ignore_sysret)
diff --git a/arch/x86/entry/entry_64_compat.S b/arch/x86/entry/entry_64_compat.S
new file mode 100644 (file)
index 0000000..bb187a6
--- /dev/null
@@ -0,0 +1,556 @@
+/*
+ * Compatibility mode system call entry point for x86-64.
+ *
+ * Copyright 2000-2002 Andi Kleen, SuSE Labs.
+ */
+#include "calling.h"
+#include <asm/asm-offsets.h>
+#include <asm/current.h>
+#include <asm/errno.h>
+#include <asm/ia32_unistd.h>
+#include <asm/thread_info.h>
+#include <asm/segment.h>
+#include <asm/irqflags.h>
+#include <asm/asm.h>
+#include <asm/smap.h>
+#include <linux/linkage.h>
+#include <linux/err.h>
+
+/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
+#include <linux/elf-em.h>
+#define AUDIT_ARCH_I386                (EM_386|__AUDIT_ARCH_LE)
+#define __AUDIT_ARCH_LE                0x40000000
+
+#ifndef CONFIG_AUDITSYSCALL
+# define sysexit_audit         ia32_ret_from_sys_call
+# define sysretl_audit         ia32_ret_from_sys_call
+#endif
+
+       .section .entry.text, "ax"
+
+#ifdef CONFIG_PARAVIRT
+ENTRY(native_usergs_sysret32)
+       swapgs
+       sysretl
+ENDPROC(native_usergs_sysret32)
+#endif
+
+/*
+ * 32-bit SYSENTER instruction entry.
+ *
+ * SYSENTER loads ss, rsp, cs, and rip from previously programmed MSRs.
+ * IF and VM in rflags are cleared (IOW: interrupts are off).
+ * SYSENTER does not save anything on the stack,
+ * and does not save old rip (!!!) and rflags.
+ *
+ * Arguments:
+ * eax  system call number
+ * ebx  arg1
+ * ecx  arg2
+ * edx  arg3
+ * esi  arg4
+ * edi  arg5
+ * ebp  user stack
+ * 0(%ebp) arg6
+ *
+ * This is purely a fast path. For anything complicated we use the int 0x80
+ * path below. We set up a complete hardware stack frame to share code
+ * with the int 0x80 path.
+ */
+ENTRY(entry_SYSENTER_compat)
+       /*
+        * Interrupts are off on entry.
+        * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
+        * it is too small to ever cause noticeable irq latency.
+        */
+       SWAPGS_UNSAFE_STACK
+       movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+       ENABLE_INTERRUPTS(CLBR_NONE)
+
+       /* Zero-extending 32-bit regs, do not remove */
+       movl    %ebp, %ebp
+       movl    %eax, %eax
+
+       movl    ASM_THREAD_INFO(TI_sysenter_return, %rsp, 0), %r10d
+
+       /* Construct struct pt_regs on stack */
+       pushq   $__USER32_DS            /* pt_regs->ss */
+       pushq   %rbp                    /* pt_regs->sp */
+       pushfq                          /* pt_regs->flags */
+       pushq   $__USER32_CS            /* pt_regs->cs */
+       pushq   %r10                    /* pt_regs->ip = thread_info->sysenter_return */
+       pushq   %rax                    /* pt_regs->orig_ax */
+       pushq   %rdi                    /* pt_regs->di */
+       pushq   %rsi                    /* pt_regs->si */
+       pushq   %rdx                    /* pt_regs->dx */
+       pushq   %rcx                    /* pt_regs->cx */
+       pushq   $-ENOSYS                /* pt_regs->ax */
+       cld
+       sub     $(10*8), %rsp /* pt_regs->r8-11, bp, bx, r12-15 not saved */
+
+       /*
+        * no need to do an access_ok check here because rbp has been
+        * 32-bit zero extended
+        */
+       ASM_STAC
+1:     movl    (%rbp), %ebp
+       _ASM_EXTABLE(1b, ia32_badarg)
+       ASM_CLAC
+
+       /*
+        * Sysenter doesn't filter flags, so we need to clear NT
+        * ourselves.  To save a few cycles, we can check whether
+        * NT was set instead of doing an unconditional popfq.
+        */
+       testl   $X86_EFLAGS_NT, EFLAGS(%rsp)
+       jnz     sysenter_fix_flags
+sysenter_flags_fixed:
+
+       orl     $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
+       testl   $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
+       jnz     sysenter_tracesys
+
+sysenter_do_call:
+       /* 32-bit syscall -> 64-bit C ABI argument conversion */
+       movl    %edi, %r8d              /* arg5 */
+       movl    %ebp, %r9d              /* arg6 */
+       xchg    %ecx, %esi              /* rsi:arg2, rcx:arg4 */
+       movl    %ebx, %edi              /* arg1 */
+       movl    %edx, %edx              /* arg3 (zero extension) */
+sysenter_dispatch:
+       cmpq    $(IA32_NR_syscalls-1), %rax
+       ja      1f
+       call    *ia32_sys_call_table(, %rax, 8)
+       movq    %rax, RAX(%rsp)
+1:
+       DISABLE_INTERRUPTS(CLBR_NONE)
+       TRACE_IRQS_OFF
+       testl   $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
+       jnz     sysexit_audit
+sysexit_from_sys_call:
+       /*
+        * NB: SYSEXIT is not obviously safe for 64-bit kernels -- an
+        * NMI between STI and SYSEXIT has poorly specified behavior,
+        * and and NMI followed by an IRQ with usergs is fatal.  So
+        * we just pretend we're using SYSEXIT but we really use
+        * SYSRETL instead.
+        *
+        * This code path is still called 'sysexit' because it pairs
+        * with 'sysenter' and it uses the SYSENTER calling convention.
+        */
+       andl    $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
+       movl    RIP(%rsp), %ecx         /* User %eip */
+       RESTORE_RSI_RDI
+       xorl    %edx, %edx              /* Do not leak kernel information */
+       xorq    %r8, %r8
+       xorq    %r9, %r9
+       xorq    %r10, %r10
+       movl    EFLAGS(%rsp), %r11d     /* User eflags */
+       TRACE_IRQS_ON
+
+       /*
+        * SYSRETL works even on Intel CPUs.  Use it in preference to SYSEXIT,
+        * since it avoids a dicey window with interrupts enabled.
+        */
+       movl    RSP(%rsp), %esp
+
+       /*
+        * USERGS_SYSRET32 does:
+        *  gsbase = user's gs base
+        *  eip = ecx
+        *  rflags = r11
+        *  cs = __USER32_CS
+        *  ss = __USER_DS
+        *
+        * The prologue set RIP(%rsp) to VDSO32_SYSENTER_RETURN, which does:
+        *
+        *  pop %ebp
+        *  pop %edx
+        *  pop %ecx
+        *
+        * Therefore, we invoke SYSRETL with EDX and R8-R10 zeroed to
+        * avoid info leaks.  R11 ends up with VDSO32_SYSENTER_RETURN's
+        * address (already known to user code), and R12-R15 are
+        * callee-saved and therefore don't contain any interesting
+        * kernel data.
+        */
+       USERGS_SYSRET32
+
+#ifdef CONFIG_AUDITSYSCALL
+       .macro auditsys_entry_common
+       /*
+        * At this point, registers hold syscall args in the 32-bit syscall ABI:
+        * EAX is syscall number, the 6 args are in EBX,ECX,EDX,ESI,EDI,EBP.
+        *
+        * We want to pass them to __audit_syscall_entry(), which is a 64-bit
+        * C function with 5 parameters, so shuffle them to match what
+        * the function expects: RDI,RSI,RDX,RCX,R8.
+        */
+       movl    %esi, %r8d              /* arg5 (R8 ) <= 4th syscall arg (ESI) */
+       xchg    %ecx, %edx              /* arg4 (RCX) <= 3rd syscall arg (EDX) */
+                                       /* arg3 (RDX) <= 2nd syscall arg (ECX) */
+       movl    %ebx, %esi              /* arg2 (RSI) <= 1st syscall arg (EBX) */
+       movl    %eax, %edi              /* arg1 (RDI) <= syscall number  (EAX) */
+       call    __audit_syscall_entry
+
+       /*
+        * We are going to jump back to the syscall dispatch code.
+        * Prepare syscall args as required by the 64-bit C ABI.
+        * Registers clobbered by __audit_syscall_entry() are
+        * loaded from pt_regs on stack:
+        */
+       movl    ORIG_RAX(%rsp), %eax    /* syscall number */
+       movl    %ebx, %edi              /* arg1 */
+       movl    RCX(%rsp), %esi         /* arg2 */
+       movl    RDX(%rsp), %edx         /* arg3 */
+       movl    RSI(%rsp), %ecx         /* arg4 */
+       movl    RDI(%rsp), %r8d         /* arg5 */
+       movl    %ebp, %r9d              /* arg6 */
+       .endm
+
+       .macro auditsys_exit exit
+       testl   $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
+       jnz     ia32_ret_from_sys_call
+       TRACE_IRQS_ON
+       ENABLE_INTERRUPTS(CLBR_NONE)
+       movl    %eax, %esi              /* second arg, syscall return value */
+       cmpl    $-MAX_ERRNO, %eax       /* is it an error ? */
+       jbe     1f
+       movslq  %eax, %rsi              /* if error sign extend to 64 bits */
+1:     setbe   %al                     /* 1 if error, 0 if not */
+       movzbl  %al, %edi               /* zero-extend that into %edi */
+       call    __audit_syscall_exit
+       movq    RAX(%rsp), %rax         /* reload syscall return value */
+       movl    $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %edi
+       DISABLE_INTERRUPTS(CLBR_NONE)
+       TRACE_IRQS_OFF
+       testl   %edi, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
+       jz      \exit
+       xorl    %eax, %eax              /* Do not leak kernel information */
+       movq    %rax, R11(%rsp)
+       movq    %rax, R10(%rsp)
+       movq    %rax, R9(%rsp)
+       movq    %rax, R8(%rsp)
+       jmp     int_with_check
+       .endm
+
+sysenter_auditsys:
+       auditsys_entry_common
+       jmp     sysenter_dispatch
+
+sysexit_audit:
+       auditsys_exit sysexit_from_sys_call
+#endif
+
+sysenter_fix_flags:
+       pushq   $(X86_EFLAGS_IF|X86_EFLAGS_FIXED)
+       popfq
+       jmp     sysenter_flags_fixed
+
+sysenter_tracesys:
+#ifdef CONFIG_AUDITSYSCALL
+       testl   $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
+       jz      sysenter_auditsys
+#endif
+       SAVE_EXTRA_REGS
+       xorl    %eax, %eax              /* Do not leak kernel information */
+       movq    %rax, R11(%rsp)
+       movq    %rax, R10(%rsp)
+       movq    %rax, R9(%rsp)
+       movq    %rax, R8(%rsp)
+       movq    %rsp, %rdi              /* &pt_regs -> arg1 */
+       call    syscall_trace_enter
+
+       /* Reload arg registers from stack. (see sysenter_tracesys) */
+       movl    RCX(%rsp), %ecx
+       movl    RDX(%rsp), %edx
+       movl    RSI(%rsp), %esi
+       movl    RDI(%rsp), %edi
+       movl    %eax, %eax              /* zero extension */
+
+       RESTORE_EXTRA_REGS
+       jmp     sysenter_do_call
+ENDPROC(entry_SYSENTER_compat)
+
+/*
+ * 32-bit SYSCALL instruction entry.
+ *
+ * 32-bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
+ * then loads new ss, cs, and rip from previously programmed MSRs.
+ * rflags gets masked by a value from another MSR (so CLD and CLAC
+ * are not needed). SYSCALL does not save anything on the stack
+ * and does not change rsp.
+ *
+ * Note: rflags saving+masking-with-MSR happens only in Long mode
+ * (in legacy 32-bit mode, IF, RF and VM bits are cleared and that's it).
+ * Don't get confused: rflags saving+masking depends on Long Mode Active bit
+ * (EFER.LMA=1), NOT on bitness of userspace where SYSCALL executes
+ * or target CS descriptor's L bit (SYSCALL does not read segment descriptors).
+ *
+ * Arguments:
+ * eax  system call number
+ * ecx  return address
+ * ebx  arg1
+ * ebp  arg2   (note: not saved in the stack frame, should not be touched)
+ * edx  arg3
+ * esi  arg4
+ * edi  arg5
+ * esp  user stack
+ * 0(%esp) arg6
+ *
+ * This is purely a fast path. For anything complicated we use the int 0x80
+ * path below. We set up a complete hardware stack frame to share code
+ * with the int 0x80 path.
+ */
+ENTRY(entry_SYSCALL_compat)
+       /*
+        * Interrupts are off on entry.
+        * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
+        * it is too small to ever cause noticeable irq latency.
+        */
+       SWAPGS_UNSAFE_STACK
+       movl    %esp, %r8d
+       movq    PER_CPU_VAR(cpu_current_top_of_stack), %rsp
+       ENABLE_INTERRUPTS(CLBR_NONE)
+
+       /* Zero-extending 32-bit regs, do not remove */
+       movl    %eax, %eax
+
+       /* Construct struct pt_regs on stack */
+       pushq   $__USER32_DS            /* pt_regs->ss */
+       pushq   %r8                     /* pt_regs->sp */
+       pushq   %r11                    /* pt_regs->flags */
+       pushq   $__USER32_CS            /* pt_regs->cs */
+       pushq   %rcx                    /* pt_regs->ip */
+       pushq   %rax                    /* pt_regs->orig_ax */
+       pushq   %rdi                    /* pt_regs->di */
+       pushq   %rsi                    /* pt_regs->si */
+       pushq   %rdx                    /* pt_regs->dx */
+       pushq   %rbp                    /* pt_regs->cx */
+       movl    %ebp, %ecx
+       pushq   $-ENOSYS                /* pt_regs->ax */
+       sub     $(10*8), %rsp           /* pt_regs->r8-11, bp, bx, r12-15 not saved */
+
+       /*
+        * No need to do an access_ok check here because r8 has been
+        * 32-bit zero extended:
+        */
+       ASM_STAC
+1:     movl    (%r8), %ebp
+       _ASM_EXTABLE(1b, ia32_badarg)
+       ASM_CLAC
+       orl     $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
+       testl   $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
+       jnz     cstar_tracesys
+
+cstar_do_call:
+       /* 32-bit syscall -> 64-bit C ABI argument conversion */
+       movl    %edi, %r8d              /* arg5 */
+       movl    %ebp, %r9d              /* arg6 */
+       xchg    %ecx, %esi              /* rsi:arg2, rcx:arg4 */
+       movl    %ebx, %edi              /* arg1 */
+       movl    %edx, %edx              /* arg3 (zero extension) */
+
+cstar_dispatch:
+       cmpq    $(IA32_NR_syscalls-1), %rax
+       ja      1f
+
+       call    *ia32_sys_call_table(, %rax, 8)
+       movq    %rax, RAX(%rsp)
+1:
+       movl    RCX(%rsp), %ebp
+       DISABLE_INTERRUPTS(CLBR_NONE)
+       TRACE_IRQS_OFF
+       testl   $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
+       jnz     sysretl_audit
+
+sysretl_from_sys_call:
+       andl    $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
+       RESTORE_RSI_RDI_RDX
+       movl    RIP(%rsp), %ecx
+       movl    EFLAGS(%rsp), %r11d
+       xorq    %r10, %r10
+       xorq    %r9, %r9
+       xorq    %r8, %r8
+       TRACE_IRQS_ON
+       movl    RSP(%rsp), %esp
+       /*
+        * 64-bit->32-bit SYSRET restores eip from ecx,
+        * eflags from r11 (but RF and VM bits are forced to 0),
+        * cs and ss are loaded from MSRs.
+        * (Note: 32-bit->32-bit SYSRET is different: since r11
+        * does not exist, it merely sets eflags.IF=1).
+        *
+        * NB: On AMD CPUs with the X86_BUG_SYSRET_SS_ATTRS bug, the ss
+        * descriptor is not reinitialized.  This means that we must
+        * avoid SYSRET with SS == NULL, which could happen if we schedule,
+        * exit the kernel, and re-enter using an interrupt vector.  (All
+        * interrupt entries on x86_64 set SS to NULL.)  We prevent that
+        * from happening by reloading SS in __switch_to.
+        */
+       USERGS_SYSRET32
+
+#ifdef CONFIG_AUDITSYSCALL
+cstar_auditsys:
+       auditsys_entry_common
+       jmp     cstar_dispatch
+
+sysretl_audit:
+       auditsys_exit sysretl_from_sys_call
+#endif
+
+cstar_tracesys:
+#ifdef CONFIG_AUDITSYSCALL
+       testl   $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
+       jz      cstar_auditsys
+#endif
+       SAVE_EXTRA_REGS
+       xorl    %eax, %eax              /* Do not leak kernel information */
+       movq    %rax, R11(%rsp)
+       movq    %rax, R10(%rsp)
+       movq    %rax, R9(%rsp)
+       movq    %rax, R8(%rsp)
+       movq    %rsp, %rdi              /* &pt_regs -> arg1 */
+       call    syscall_trace_enter
+
+       /* Reload arg registers from stack. (see sysenter_tracesys) */
+       movl    RCX(%rsp), %ecx
+       movl    RDX(%rsp), %edx
+       movl    RSI(%rsp), %esi
+       movl    RDI(%rsp), %edi
+       movl    %eax, %eax              /* zero extension */
+
+       RESTORE_EXTRA_REGS
+       jmp     cstar_do_call
+END(entry_SYSCALL_compat)
+
+ia32_badarg:
+       ASM_CLAC
+       movq    $-EFAULT, RAX(%rsp)
+ia32_ret_from_sys_call:
+       xorl    %eax, %eax              /* Do not leak kernel information */
+       movq    %rax, R11(%rsp)
+       movq    %rax, R10(%rsp)
+       movq    %rax, R9(%rsp)
+       movq    %rax, R8(%rsp)
+       jmp     int_ret_from_sys_call
+
+/*
+ * Emulated IA32 system calls via int 0x80.
+ *
+ * Arguments:
+ * eax  system call number
+ * ebx  arg1
+ * ecx  arg2
+ * edx  arg3
+ * esi  arg4
+ * edi  arg5
+ * ebp  arg6   (note: not saved in the stack frame, should not be touched)
+ *
+ * Notes:
+ * Uses the same stack frame as the x86-64 version.
+ * All registers except eax must be saved (but ptrace may violate that).
+ * Arguments are zero extended. For system calls that want sign extension and
+ * take long arguments a wrapper is needed. Most calls can just be called
+ * directly.
+ * Assumes it is only called from user space and entered with interrupts off.
+ */
+
+ENTRY(entry_INT80_compat)
+       /*
+        * Interrupts are off on entry.
+        * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
+        * it is too small to ever cause noticeable irq latency.
+        */
+       PARAVIRT_ADJUST_EXCEPTION_FRAME
+       SWAPGS
+       ENABLE_INTERRUPTS(CLBR_NONE)
+
+       /* Zero-extending 32-bit regs, do not remove */
+       movl    %eax, %eax
+
+       /* Construct struct pt_regs on stack (iret frame is already on stack) */
+       pushq   %rax                    /* pt_regs->orig_ax */
+       pushq   %rdi                    /* pt_regs->di */
+       pushq   %rsi                    /* pt_regs->si */
+       pushq   %rdx                    /* pt_regs->dx */
+       pushq   %rcx                    /* pt_regs->cx */
+       pushq   $-ENOSYS                /* pt_regs->ax */
+       pushq   $0                      /* pt_regs->r8 */
+       pushq   $0                      /* pt_regs->r9 */
+       pushq   $0                      /* pt_regs->r10 */
+       pushq   $0                      /* pt_regs->r11 */
+       cld
+       sub     $(6*8), %rsp /* pt_regs->bp, bx, r12-15 not saved */
+
+       orl     $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
+       testl   $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
+       jnz     ia32_tracesys
+
+ia32_do_call:
+       /* 32-bit syscall -> 64-bit C ABI argument conversion */
+       movl    %edi, %r8d              /* arg5 */
+       movl    %ebp, %r9d              /* arg6 */
+       xchg    %ecx, %esi              /* rsi:arg2, rcx:arg4 */
+       movl    %ebx, %edi              /* arg1 */
+       movl    %edx, %edx              /* arg3 (zero extension) */
+       cmpq    $(IA32_NR_syscalls-1), %rax
+       ja      1f
+
+       call    *ia32_sys_call_table(, %rax, 8)
+       movq    %rax, RAX(%rsp)
+1:
+       jmp     int_ret_from_sys_call
+
+ia32_tracesys:
+       SAVE_EXTRA_REGS
+       movq    %rsp, %rdi                      /* &pt_regs -> arg1 */
+       call    syscall_trace_enter
+       /*
+        * Reload arg registers from stack in case ptrace changed them.
+        * Don't reload %eax because syscall_trace_enter() returned
+        * the %rax value we should see.  But do truncate it to 32 bits.
+        * If it's -1 to make us punt the syscall, then (u32)-1 is still
+        * an appropriately invalid value.
+        */
+       movl    RCX(%rsp), %ecx
+       movl    RDX(%rsp), %edx
+       movl    RSI(%rsp), %esi
+       movl    RDI(%rsp), %edi
+       movl    %eax, %eax              /* zero extension */
+       RESTORE_EXTRA_REGS
+       jmp     ia32_do_call
+END(entry_INT80_compat)
+
+       .macro PTREGSCALL label, func
+       ALIGN
+GLOBAL(\label)
+       leaq    \func(%rip), %rax
+       jmp     ia32_ptregs_common
+       .endm
+
+       PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn
+       PTREGSCALL stub32_sigreturn,    sys32_sigreturn
+       PTREGSCALL stub32_fork,         sys_fork
+       PTREGSCALL stub32_vfork,        sys_vfork
+
+       ALIGN
+GLOBAL(stub32_clone)
+       leaq    sys_clone(%rip), %rax
+       /*
+        * The 32-bit clone ABI is: clone(..., int tls_val, int *child_tidptr).
+        * The 64-bit clone ABI is: clone(..., int *child_tidptr, int tls_val).
+        *
+        * The native 64-bit kernel's sys_clone() implements the latter,
+        * so we need to swap arguments here before calling it:
+        */
+       xchg    %r8, %rcx
+       jmp     ia32_ptregs_common
+
+       ALIGN
+ia32_ptregs_common:
+       SAVE_EXTRA_REGS 8
+       call    *%rax
+       RESTORE_EXTRA_REGS 8
+       ret
+END(ia32_ptregs_common)
diff --git a/arch/x86/entry/syscall_32.c b/arch/x86/entry/syscall_32.c
new file mode 100644 (file)
index 0000000..8ea34f9
--- /dev/null
@@ -0,0 +1,33 @@
+/* System call table for i386. */
+
+#include <linux/linkage.h>
+#include <linux/sys.h>
+#include <linux/cache.h>
+#include <asm/asm-offsets.h>
+
+#ifdef CONFIG_IA32_EMULATION
+#define SYM(sym, compat) compat
+#else
+#define SYM(sym, compat) sym
+#define ia32_sys_call_table sys_call_table
+#define __NR_syscall_compat_max __NR_syscall_max
+#endif
+
+#define __SYSCALL_I386(nr, sym, compat) extern asmlinkage void SYM(sym, compat)(void) ;
+#include <asm/syscalls_32.h>
+#undef __SYSCALL_I386
+
+#define __SYSCALL_I386(nr, sym, compat) [nr] = SYM(sym, compat),
+
+typedef asmlinkage void (*sys_call_ptr_t)(void);
+
+extern asmlinkage void sys_ni_syscall(void);
+
+__visible const sys_call_ptr_t ia32_sys_call_table[__NR_syscall_compat_max+1] = {
+       /*
+        * Smells like a compiler bug -- it doesn't work
+        * when the & below is removed.
+        */
+       [0 ... __NR_syscall_compat_max] = &sys_ni_syscall,
+#include <asm/syscalls_32.h>
+};
diff --git a/arch/x86/entry/syscall_64.c b/arch/x86/entry/syscall_64.c
new file mode 100644 (file)
index 0000000..4ac730b
--- /dev/null
@@ -0,0 +1,32 @@
+/* System call table for x86-64. */
+
+#include <linux/linkage.h>
+#include <linux/sys.h>
+#include <linux/cache.h>
+#include <asm/asm-offsets.h>
+#include <asm/syscall.h>
+
+#define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
+
+#ifdef CONFIG_X86_X32_ABI
+# define __SYSCALL_X32(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
+#else
+# define __SYSCALL_X32(nr, sym, compat) /* nothing */
+#endif
+
+#define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ;
+#include <asm/syscalls_64.h>
+#undef __SYSCALL_64
+
+#define __SYSCALL_64(nr, sym, compat) [nr] = sym,
+
+extern void sys_ni_syscall(void);
+
+asmlinkage const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = {
+       /*
+        * Smells like a compiler bug -- it doesn't work
+        * when the & below is removed.
+        */
+       [0 ... __NR_syscall_max] = &sys_ni_syscall,
+#include <asm/syscalls_64.h>
+};
diff --git a/arch/x86/entry/syscalls/Makefile b/arch/x86/entry/syscalls/Makefile
new file mode 100644 (file)
index 0000000..57aa59f
--- /dev/null
@@ -0,0 +1,69 @@
+out := $(obj)/../../include/generated/asm
+uapi := $(obj)/../../include/generated/uapi/asm
+
+# Create output directory if not already present
+_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') \
+         $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')
+
+syscall32 := $(srctree)/$(src)/syscall_32.tbl
+syscall64 := $(srctree)/$(src)/syscall_64.tbl
+
+syshdr := $(srctree)/$(src)/syscallhdr.sh
+systbl := $(srctree)/$(src)/syscalltbl.sh
+
+quiet_cmd_syshdr = SYSHDR  $@
+      cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
+                  '$(syshdr_abi_$(basetarget))' \
+                  '$(syshdr_pfx_$(basetarget))' \
+                  '$(syshdr_offset_$(basetarget))'
+quiet_cmd_systbl = SYSTBL  $@
+      cmd_systbl = $(CONFIG_SHELL) '$(systbl)' $< $@
+
+quiet_cmd_hypercalls = HYPERCALLS $@
+      cmd_hypercalls = $(CONFIG_SHELL) '$<' $@ $(filter-out $<,$^)
+
+syshdr_abi_unistd_32 := i386
+$(uapi)/unistd_32.h: $(syscall32) $(syshdr)
+       $(call if_changed,syshdr)
+
+syshdr_abi_unistd_32_ia32 := i386
+syshdr_pfx_unistd_32_ia32 := ia32_
+$(out)/unistd_32_ia32.h: $(syscall32) $(syshdr)
+       $(call if_changed,syshdr)
+
+syshdr_abi_unistd_x32 := common,x32
+syshdr_offset_unistd_x32 := __X32_SYSCALL_BIT
+$(uapi)/unistd_x32.h: $(syscall64) $(syshdr)
+       $(call if_changed,syshdr)
+
+syshdr_abi_unistd_64 := common,64
+$(uapi)/unistd_64.h: $(syscall64) $(syshdr)
+       $(call if_changed,syshdr)
+
+syshdr_abi_unistd_64_x32 := x32
+syshdr_pfx_unistd_64_x32 := x32_
+$(out)/unistd_64_x32.h: $(syscall64) $(syshdr)
+       $(call if_changed,syshdr)
+
+$(out)/syscalls_32.h: $(syscall32) $(systbl)
+       $(call if_changed,systbl)
+$(out)/syscalls_64.h: $(syscall64) $(systbl)
+       $(call if_changed,systbl)
+
+$(out)/xen-hypercalls.h: $(srctree)/scripts/xen-hypercalls.sh
+       $(call if_changed,hypercalls)
+
+$(out)/xen-hypercalls.h: $(srctree)/include/xen/interface/xen*.h
+
+uapisyshdr-y                   += unistd_32.h unistd_64.h unistd_x32.h
+syshdr-y                       += syscalls_32.h
+syshdr-$(CONFIG_X86_64)                += unistd_32_ia32.h unistd_64_x32.h
+syshdr-$(CONFIG_X86_64)                += syscalls_64.h
+syshdr-$(CONFIG_XEN)           += xen-hypercalls.h
+
+targets        += $(uapisyshdr-y) $(syshdr-y)
+
+PHONY += all
+all: $(addprefix $(uapi)/,$(uapisyshdr-y))
+all: $(addprefix $(out)/,$(syshdr-y))
+       @:
diff --git a/arch/x86/entry/syscalls/syscall_32.tbl b/arch/x86/entry/syscalls/syscall_32.tbl
new file mode 100644 (file)
index 0000000..ef8187f
--- /dev/null
@@ -0,0 +1,367 @@
+#
+# 32-bit system call numbers and entry vectors
+#
+# The format is:
+# <number> <abi> <name> <entry point> <compat entry point>
+#
+# The abi is always "i386" for this file.
+#
+0      i386    restart_syscall         sys_restart_syscall
+1      i386    exit                    sys_exit
+2      i386    fork                    sys_fork                        stub32_fork
+3      i386    read                    sys_read
+4      i386    write                   sys_write
+5      i386    open                    sys_open                        compat_sys_open
+6      i386    close                   sys_close
+7      i386    waitpid                 sys_waitpid                     sys32_waitpid
+8      i386    creat                   sys_creat
+9      i386    link                    sys_link
+10     i386    unlink                  sys_unlink
+11     i386    execve                  sys_execve                      stub32_execve
+12     i386    chdir                   sys_chdir
+13     i386    time                    sys_time                        compat_sys_time
+14     i386    mknod                   sys_mknod
+15     i386    chmod                   sys_chmod
+16     i386    lchown                  sys_lchown16
+17     i386    break
+18     i386    oldstat                 sys_stat
+19     i386    lseek                   sys_lseek                       compat_sys_lseek
+20     i386    getpid                  sys_getpid
+21     i386    mount                   sys_mount                       compat_sys_mount
+22     i386    umount                  sys_oldumount
+23     i386    setuid                  sys_setuid16
+24     i386    getuid                  sys_getuid16
+25     i386    stime                   sys_stime                       compat_sys_stime
+26     i386    ptrace                  sys_ptrace                      compat_sys_ptrace
+27     i386    alarm                   sys_alarm
+28     i386    oldfstat                sys_fstat
+29     i386    pause                   sys_pause
+30     i386    utime                   sys_utime                       compat_sys_utime
+31     i386    stty
+32     i386    gtty
+33     i386    access                  sys_access
+34     i386    nice                    sys_nice
+35     i386    ftime
+36     i386    sync                    sys_sync
+37     i386    kill                    sys_kill
+38     i386    rename                  sys_rename
+39     i386    mkdir                   sys_mkdir
+40     i386    rmdir                   sys_rmdir
+41     i386    dup                     sys_dup
+42     i386    pipe                    sys_pipe
+43     i386    times                   sys_times                       compat_sys_times
+44     i386    prof
+45     i386    brk                     sys_brk
+46     i386    setgid                  sys_setgid16
+47     i386    getgid                  sys_getgid16
+48     i386    signal                  sys_signal
+49     i386    geteuid                 sys_geteuid16
+50     i386    getegid                 sys_getegid16
+51     i386    acct                    sys_acct
+52     i386    umount2                 sys_umount
+53     i386    lock
+54     i386    ioctl                   sys_ioctl                       compat_sys_ioctl
+55     i386    fcntl                   sys_fcntl                       compat_sys_fcntl64
+56     i386    mpx
+57     i386    setpgid                 sys_setpgid
+58     i386    ulimit
+59     i386    oldolduname             sys_olduname
+60     i386    umask                   sys_umask
+61     i386    chroot                  sys_chroot
+62     i386    ustat                   sys_ustat                       compat_sys_ustat
+63     i386    dup2                    sys_dup2
+64     i386    getppid                 sys_getppid
+65     i386    getpgrp                 sys_getpgrp
+66     i386    setsid                  sys_setsid
+67     i386    sigaction               sys_sigaction                   compat_sys_sigaction
+68     i386    sgetmask                sys_sgetmask
+69     i386    ssetmask                sys_ssetmask
+70     i386    setreuid                sys_setreuid16
+71     i386    setregid                sys_setregid16
+72     i386    sigsuspend              sys_sigsuspend                  sys_sigsuspend
+73     i386    sigpending              sys_sigpending                  compat_sys_sigpending
+74     i386    sethostname             sys_sethostname
+75     i386    setrlimit               sys_setrlimit                   compat_sys_setrlimit
+76     i386    getrlimit               sys_old_getrlimit               compat_sys_old_getrlimit
+77     i386    getrusage               sys_getrusage                   compat_sys_getrusage
+78     i386    gettimeofday            sys_gettimeofday                compat_sys_gettimeofday
+79     i386    settimeofday            sys_settimeofday                compat_sys_settimeofday
+80     i386    getgroups               sys_getgroups16
+81     i386    setgroups               sys_setgroups16
+82     i386    select                  sys_old_select                  compat_sys_old_select
+83     i386    symlink                 sys_symlink
+84     i386    oldlstat                sys_lstat
+85     i386    readlink                sys_readlink
+86     i386    uselib                  sys_uselib
+87     i386    swapon                  sys_swapon
+88     i386    reboot                  sys_reboot
+89     i386    readdir                 sys_old_readdir                 compat_sys_old_readdir
+90     i386    mmap                    sys_old_mmap                    sys32_mmap
+91     i386    munmap                  sys_munmap
+92     i386    truncate                sys_truncate                    compat_sys_truncate
+93     i386    ftruncate               sys_ftruncate                   compat_sys_ftruncate
+94     i386    fchmod                  sys_fchmod
+95     i386    fchown                  sys_fchown16
+96     i386    getpriority             sys_getpriority
+97     i386    setpriority             sys_setpriority
+98     i386    profil
+99     i386    statfs                  sys_statfs                      compat_sys_statfs
+100    i386    fstatfs                 sys_fstatfs                     compat_sys_fstatfs
+101    i386    ioperm                  sys_ioperm
+102    i386    socketcall              sys_socketcall                  compat_sys_socketcall
+103    i386    syslog                  sys_syslog
+104    i386    setitimer               sys_setitimer                   compat_sys_setitimer
+105    i386    getitimer               sys_getitimer                   compat_sys_getitimer
+106    i386    stat                    sys_newstat                     compat_sys_newstat
+107    i386    lstat                   sys_newlstat                    compat_sys_newlstat
+108    i386    fstat                   sys_newfstat                    compat_sys_newfstat
+109    i386    olduname                sys_uname
+110    i386    iopl                    sys_iopl
+111    i386    vhangup                 sys_vhangup
+112    i386    idle
+113    i386    vm86old                 sys_vm86old                     sys_ni_syscall
+114    i386    wait4                   sys_wait4                       compat_sys_wait4
+115    i386    swapoff                 sys_swapoff
+116    i386    sysinfo                 sys_sysinfo                     compat_sys_sysinfo
+117    i386    ipc                     sys_ipc                         compat_sys_ipc
+118    i386    fsync                   sys_fsync
+119    i386    sigreturn               sys_sigreturn                   stub32_sigreturn
+120    i386    clone                   sys_clone                       stub32_clone
+121    i386    setdomainname           sys_setdomainname
+122    i386    uname                   sys_newuname
+123    i386    modify_ldt              sys_modify_ldt
+124    i386    adjtimex                sys_adjtimex                    compat_sys_adjtimex
+125    i386    mprotect                sys_mprotect
+126    i386    sigprocmask             sys_sigprocmask                 compat_sys_sigprocmask
+127    i386    create_module
+128    i386    init_module             sys_init_module
+129    i386    delete_module           sys_delete_module
+130    i386    get_kernel_syms
+131    i386    quotactl                sys_quotactl                    sys32_quotactl
+132    i386    getpgid                 sys_getpgid
+133    i386    fchdir                  sys_fchdir
+134    i386    bdflush                 sys_bdflush
+135    i386    sysfs                   sys_sysfs
+136    i386    personality             sys_personality
+137    i386    afs_syscall
+138    i386    setfsuid                sys_setfsuid16
+139    i386    setfsgid                sys_setfsgid16
+140    i386    _llseek                 sys_llseek
+141    i386    getdents                sys_getdents                    compat_sys_getdents
+142    i386    _newselect              sys_select                      compat_sys_select
+143    i386    flock                   sys_flock
+144    i386    msync                   sys_msync
+145    i386    readv                   sys_readv                       compat_sys_readv
+146    i386    writev                  sys_writev                      compat_sys_writev
+147    i386    getsid                  sys_getsid
+148    i386    fdatasync               sys_fdatasync
+149    i386    _sysctl                 sys_sysctl                      compat_sys_sysctl
+150    i386    mlock                   sys_mlock
+151    i386    munlock                 sys_munlock
+152    i386    mlockall                sys_mlockall
+153    i386    munlockall              sys_munlockall
+154    i386    sched_setparam          sys_sched_setparam
+155    i386    sched_getparam          sys_sched_getparam
+156    i386    sched_setscheduler      sys_sched_setscheduler
+157    i386    sched_getscheduler      sys_sched_getscheduler
+158    i386    sched_yield             sys_sched_yield
+159    i386    sched_get_priority_max  sys_sched_get_priority_max
+160    i386    sched_get_priority_min  sys_sched_get_priority_min
+161    i386    sched_rr_get_interval   sys_sched_rr_get_interval       compat_sys_sched_rr_get_interval
+162    i386    nanosleep               sys_nanosleep                   compat_sys_nanosleep
+163    i386    mremap                  sys_mremap
+164    i386    setresuid               sys_setresuid16
+165    i386    getresuid               sys_getresuid16
+166    i386    vm86                    sys_vm86                        sys_ni_syscall
+167    i386    query_module
+168    i386    poll                    sys_poll
+169    i386    nfsservctl
+170    i386    setresgid               sys_setresgid16
+171    i386    getresgid               sys_getresgid16
+172    i386    prctl                   sys_prctl
+173    i386    rt_sigreturn            sys_rt_sigreturn                stub32_rt_sigreturn
+174    i386    rt_sigaction            sys_rt_sigaction                compat_sys_rt_sigaction
+175    i386    rt_sigprocmask          sys_rt_sigprocmask
+176    i386    rt_sigpending           sys_rt_sigpending               compat_sys_rt_sigpending
+177    i386    rt_sigtimedwait         sys_rt_sigtimedwait             compat_sys_rt_sigtimedwait
+178    i386    rt_sigqueueinfo         sys_rt_sigqueueinfo             compat_sys_rt_sigqueueinfo
+179    i386    rt_sigsuspend           sys_rt_sigsuspend
+180    i386    pread64                 sys_pread64                     sys32_pread
+181    i386    pwrite64                sys_pwrite64                    sys32_pwrite
+182    i386    chown                   sys_chown16
+183    i386    getcwd                  sys_getcwd
+184    i386    capget                  sys_capget
+185    i386    capset                  sys_capset
+186    i386    sigaltstack             sys_sigaltstack                 compat_sys_sigaltstack
+187    i386    sendfile                sys_sendfile                    compat_sys_sendfile
+188    i386    getpmsg
+189    i386    putpmsg
+190    i386    vfork                   sys_vfork                       stub32_vfork
+191    i386    ugetrlimit              sys_getrlimit                   compat_sys_getrlimit
+192    i386    mmap2                   sys_mmap_pgoff
+193    i386    truncate64              sys_truncate64                  sys32_truncate64
+194    i386    ftruncate64             sys_ftruncate64                 sys32_ftruncate64
+195    i386    stat64                  sys_stat64                      sys32_stat64
+196    i386    lstat64                 sys_lstat64                     sys32_lstat64
+197    i386    fstat64                 sys_fstat64                     sys32_fstat64
+198    i386    lchown32                sys_lchown
+199    i386    getuid32                sys_getuid
+200    i386    getgid32                sys_getgid
+201    i386    geteuid32               sys_geteuid
+202    i386    getegid32               sys_getegid
+203    i386    setreuid32              sys_setreuid
+204    i386    setregid32              sys_setregid
+205    i386    getgroups32             sys_getgroups
+206    i386    setgroups32             sys_setgroups
+207    i386    fchown32                sys_fchown
+208    i386    setresuid32             sys_setresuid
+209    i386    getresuid32             sys_getresuid
+210    i386    setresgid32             sys_setresgid
+211    i386    getresgid32             sys_getresgid
+212    i386    chown32                 sys_chown
+213    i386    setuid32                sys_setuid
+214    i386    setgid32                sys_setgid
+215    i386    setfsuid32              sys_setfsuid
+216    i386    setfsgid32              sys_setfsgid
+217    i386    pivot_root              sys_pivot_root
+218    i386    mincore                 sys_mincore
+219    i386    madvise                 sys_madvise
+220    i386    getdents64              sys_getdents64                  compat_sys_getdents64
+221    i386    fcntl64                 sys_fcntl64                     compat_sys_fcntl64
+# 222 is unused
+# 223 is unused
+224    i386    gettid                  sys_gettid
+225    i386    readahead               sys_readahead                   sys32_readahead
+226    i386    setxattr                sys_setxattr
+227    i386    lsetxattr               sys_lsetxattr
+228    i386    fsetxattr               sys_fsetxattr
+229    i386    getxattr                sys_getxattr
+230    i386    lgetxattr               sys_lgetxattr
+231    i386    fgetxattr               sys_fgetxattr
+232    i386    listxattr               sys_listxattr
+233    i386    llistxattr              sys_llistxattr
+234    i386    flistxattr              sys_flistxattr
+235    i386    removexattr             sys_removexattr
+236    i386    lremovexattr            sys_lremovexattr
+237    i386    fremovexattr            sys_fremovexattr
+238    i386    tkill                   sys_tkill
+239    i386    sendfile64              sys_sendfile64
+240    i386    futex                   sys_futex                       compat_sys_futex
+241    i386    sched_setaffinity       sys_sched_setaffinity           compat_sys_sched_setaffinity
+242    i386    sched_getaffinity       sys_sched_getaffinity           compat_sys_sched_getaffinity
+243    i386    set_thread_area         sys_set_thread_area
+244    i386    get_thread_area         sys_get_thread_area
+245    i386    io_setup                sys_io_setup                    compat_sys_io_setup
+246    i386    io_destroy              sys_io_destroy
+247    i386    io_getevents            sys_io_getevents                compat_sys_io_getevents
+248    i386    io_submit               sys_io_submit                   compat_sys_io_submit
+249    i386    io_cancel               sys_io_cancel
+250    i386    fadvise64               sys_fadvise64                   sys32_fadvise64
+# 251 is available for reuse (was briefly sys_set_zone_reclaim)
+252    i386    exit_group              sys_exit_group
+253    i386    lookup_dcookie          sys_lookup_dcookie              compat_sys_lookup_dcookie
+254    i386    epoll_create            sys_epoll_create
+255    i386    epoll_ctl               sys_epoll_ctl
+256    i386    epoll_wait              sys_epoll_wait
+257    i386    remap_file_pages        sys_remap_file_pages
+258    i386    set_tid_address         sys_set_tid_address
+259    i386    timer_create            sys_timer_create                compat_sys_timer_create
+260    i386    timer_settime           sys_timer_settime               compat_sys_timer_settime
+261    i386    timer_gettime           sys_timer_gettime               compat_sys_timer_gettime
+262    i386    timer_getoverrun        sys_timer_getoverrun
+263    i386    timer_delete            sys_timer_delete
+264    i386    clock_settime           sys_clock_settime               compat_sys_clock_settime
+265    i386    clock_gettime           sys_clock_gettime               compat_sys_clock_gettime
+266    i386    clock_getres            sys_clock_getres                compat_sys_clock_getres
+267    i386    clock_nanosleep         sys_clock_nanosleep             compat_sys_clock_nanosleep
+268    i386    statfs64                sys_statfs64                    compat_sys_statfs64
+269    i386    fstatfs64               sys_fstatfs64                   compat_sys_fstatfs64
+270    i386    tgkill                  sys_tgkill
+271    i386    utimes                  sys_utimes                      compat_sys_utimes
+272    i386    fadvise64_64            sys_fadvise64_64                sys32_fadvise64_64
+273    i386    vserver
+274    i386    mbind                   sys_mbind
+275    i386    get_mempolicy           sys_get_mempolicy               compat_sys_get_mempolicy
+276    i386    set_mempolicy           sys_set_mempolicy
+277    i386    mq_open                 sys_mq_open                     compat_sys_mq_open
+278    i386    mq_unlink               sys_mq_unlink
+279    i386    mq_timedsend            sys_mq_timedsend                compat_sys_mq_timedsend
+280    i386    mq_timedreceive         sys_mq_timedreceive             compat_sys_mq_timedreceive
+281    i386    mq_notify               sys_mq_notify                   compat_sys_mq_notify
+282    i386    mq_getsetattr           sys_mq_getsetattr               compat_sys_mq_getsetattr
+283    i386    kexec_load              sys_kexec_load                  compat_sys_kexec_load
+284    i386    waitid                  sys_waitid                      compat_sys_waitid
+# 285 sys_setaltroot
+286    i386    add_key                 sys_add_key
+287    i386    request_key             sys_request_key
+288    i386    keyctl                  sys_keyctl
+289    i386    ioprio_set              sys_ioprio_set
+290    i386    ioprio_get              sys_ioprio_get
+291    i386    inotify_init            sys_inotify_init
+292    i386    inotify_add_watch       sys_inotify_add_watch
+293    i386    inotify_rm_watch        sys_inotify_rm_watch
+294    i386    migrate_pages           sys_migrate_pages
+295    i386    openat                  sys_openat                      compat_sys_openat
+296    i386    mkdirat                 sys_mkdirat
+297    i386    mknodat                 sys_mknodat
+298    i386    fchownat                sys_fchownat
+299    i386    futimesat               sys_futimesat                   compat_sys_futimesat
+300    i386    fstatat64               sys_fstatat64                   sys32_fstatat
+301    i386    unlinkat                sys_unlinkat
+302    i386    renameat                sys_renameat
+303    i386    linkat                  sys_linkat
+304    i386    symlinkat               sys_symlinkat
+305    i386    readlinkat              sys_readlinkat
+306    i386    fchmodat                sys_fchmodat
+307    i386    faccessat               sys_faccessat
+308    i386    pselect6                sys_pselect6                    compat_sys_pselect6
+309    i386    ppoll                   sys_ppoll                       compat_sys_ppoll
+310    i386    unshare                 sys_unshare
+311    i386    set_robust_list         sys_set_robust_list             compat_sys_set_robust_list
+312    i386    get_robust_list         sys_get_robust_list             compat_sys_get_robust_list
+313    i386    splice                  sys_splice
+314    i386    sync_file_range         sys_sync_file_range             sys32_sync_file_range
+315    i386    tee                     sys_tee
+316    i386    vmsplice                sys_vmsplice                    compat_sys_vmsplice
+317    i386    move_pages              sys_move_pages                  compat_sys_move_pages
+318    i386    getcpu                  sys_getcpu
+319    i386    epoll_pwait             sys_epoll_pwait
+320    i386    utimensat               sys_utimensat                   compat_sys_utimensat
+321    i386    signalfd                sys_signalfd                    compat_sys_signalfd
+322    i386    timerfd_create          sys_timerfd_create
+323    i386    eventfd                 sys_eventfd
+324    i386    fallocate               sys_fallocate                   sys32_fallocate
+325    i386    timerfd_settime         sys_timerfd_settime             compat_sys_timerfd_settime
+326    i386    timerfd_gettime         sys_timerfd_gettime             compat_sys_timerfd_gettime
+327    i386    signalfd4               sys_signalfd4                   compat_sys_signalfd4
+328    i386    eventfd2                sys_eventfd2
+329    i386    epoll_create1           sys_epoll_create1
+330    i386    dup3                    sys_dup3
+331    i386    pipe2                   sys_pipe2
+332    i386    inotify_init1           sys_inotify_init1
+333    i386    preadv                  sys_preadv                      compat_sys_preadv
+334    i386    pwritev                 sys_pwritev                     compat_sys_pwritev
+335    i386    rt_tgsigqueueinfo       sys_rt_tgsigqueueinfo           compat_sys_rt_tgsigqueueinfo
+336    i386    perf_event_open         sys_perf_event_open
+337    i386    recvmmsg                sys_recvmmsg                    compat_sys_recvmmsg
+338    i386    fanotify_init           sys_fanotify_init
+339    i386    fanotify_mark           sys_fanotify_mark               compat_sys_fanotify_mark
+340    i386    prlimit64               sys_prlimit64
+341    i386    name_to_handle_at       sys_name_to_handle_at
+342    i386    open_by_handle_at       sys_open_by_handle_at           compat_sys_open_by_handle_at
+343    i386    clock_adjtime           sys_clock_adjtime               compat_sys_clock_adjtime
+344    i386    syncfs                  sys_syncfs
+345    i386    sendmmsg                sys_sendmmsg                    compat_sys_sendmmsg
+346    i386    setns                   sys_setns
+347    i386    process_vm_readv        sys_process_vm_readv            compat_sys_process_vm_readv
+348    i386    process_vm_writev       sys_process_vm_writev           compat_sys_process_vm_writev
+349    i386    kcmp                    sys_kcmp
+350    i386    finit_module            sys_finit_module
+351    i386    sched_setattr           sys_sched_setattr
+352    i386    sched_getattr           sys_sched_getattr
+353    i386    renameat2               sys_renameat2
+354    i386    seccomp                 sys_seccomp
+355    i386    getrandom               sys_getrandom
+356    i386    memfd_create            sys_memfd_create
+357    i386    bpf                     sys_bpf
+358    i386    execveat                sys_execveat                    stub32_execveat
diff --git a/arch/x86/entry/syscalls/syscall_64.tbl b/arch/x86/entry/syscalls/syscall_64.tbl
new file mode 100644 (file)
index 0000000..9ef32d5
--- /dev/null
@@ -0,0 +1,370 @@
+#
+# 64-bit system call numbers and entry vectors
+#
+# The format is:
+# <number> <abi> <name> <entry point>
+#
+# The abi is "common", "64" or "x32" for this file.
+#
+0      common  read                    sys_read
+1      common  write                   sys_write
+2      common  open                    sys_open
+3      common  close                   sys_close
+4      common  stat                    sys_newstat
+5      common  fstat                   sys_newfstat
+6      common  lstat                   sys_newlstat
+7      common  poll                    sys_poll
+8      common  lseek                   sys_lseek
+9      common  mmap                    sys_mmap
+10     common  mprotect                sys_mprotect
+11     common  munmap                  sys_munmap
+12     common  brk                     sys_brk
+13     64      rt_sigaction            sys_rt_sigaction
+14     common  rt_sigprocmask          sys_rt_sigprocmask
+15     64      rt_sigreturn            stub_rt_sigreturn
+16     64      ioctl                   sys_ioctl
+17     common  pread64                 sys_pread64
+18     common  pwrite64                sys_pwrite64
+19     64      readv                   sys_readv
+20     64      writev                  sys_writev
+21     common  access                  sys_access
+22     common  pipe                    sys_pipe
+23     common  select                  sys_select
+24     common  sched_yield             sys_sched_yield
+25     common  mremap                  sys_mremap
+26     common  msync                   sys_msync
+27     common  mincore                 sys_mincore
+28     common  madvise                 sys_madvise
+29     common  shmget                  sys_shmget
+30     common  shmat                   sys_shmat
+31     common  shmctl                  sys_shmctl
+32     common  dup                     sys_dup
+33     common  dup2                    sys_dup2
+34     common  pause                   sys_pause
+35     common  nanosleep               sys_nanosleep
+36     common  getitimer               sys_getitimer
+37     common  alarm                   sys_alarm
+38     common  setitimer               sys_setitimer
+39     common  getpid                  sys_getpid
+40     common  sendfile                sys_sendfile64
+41     common  socket                  sys_socket
+42     common  connect                 sys_connect
+43     common  accept                  sys_accept
+44     common  sendto                  sys_sendto
+45     64      recvfrom                sys_recvfrom
+46     64      sendmsg                 sys_sendmsg
+47     64      recvmsg                 sys_recvmsg
+48     common  shutdown                sys_shutdown
+49     common  bind                    sys_bind
+50     common  listen                  sys_listen
+51     common  getsockname             sys_getsockname
+52     common  getpeername             sys_getpeername
+53     common  socketpair              sys_socketpair
+54     64      setsockopt              sys_setsockopt
+55     64      getsockopt              sys_getsockopt
+56     common  clone                   stub_clone
+57     common  fork                    stub_fork
+58     common  vfork                   stub_vfork
+59     64      execve                  stub_execve
+60     common  exit                    sys_exit
+61     common  wait4                   sys_wait4
+62     common  kill                    sys_kill
+63     common  uname                   sys_newuname
+64     common  semget                  sys_semget
+65     common  semop                   sys_semop
+66     common  semctl                  sys_semctl
+67     common  shmdt                   sys_shmdt
+68     common  msgget                  sys_msgget
+69     common  msgsnd                  sys_msgsnd
+70     common  msgrcv                  sys_msgrcv
+71     common  msgctl                  sys_msgctl
+72     common  fcntl                   sys_fcntl
+73     common  flock                   sys_flock
+74     common  fsync                   sys_fsync
+75     common  fdatasync               sys_fdatasync
+76     common  truncate                sys_truncate
+77     common  ftruncate               sys_ftruncate
+78     common  getdents                sys_getdents
+79     common  getcwd                  sys_getcwd
+80     common  chdir                   sys_chdir
+81     common  fchdir                  sys_fchdir
+82     common  rename                  sys_rename
+83     common  mkdir                   sys_mkdir
+84     common  rmdir                   sys_rmdir
+85     common  creat                   sys_creat
+86     common  link                    sys_link
+87     common  unlink                  sys_unlink
+88     common  symlink                 sys_symlink
+89     common  readlink                sys_readlink
+90     common  chmod                   sys_chmod
+91     common  fchmod                  sys_fchmod
+92     common  chown                   sys_chown
+93     common  fchown                  sys_fchown
+94     common  lchown                  sys_lchown
+95     common  umask                   sys_umask
+96     common  gettimeofday            sys_gettimeofday
+97     common  getrlimit               sys_getrlimit
+98     common  getrusage               sys_getrusage
+99     common  sysinfo                 sys_sysinfo
+100    common  times                   sys_times
+101    64      ptrace                  sys_ptrace
+102    common  getuid                  sys_getuid
+103    common  syslog                  sys_syslog
+104    common  getgid                  sys_getgid
+105    common  setuid                  sys_setuid
+106    common  setgid                  sys_setgid
+107    common  geteuid                 sys_geteuid
+108    common  getegid                 sys_getegid
+109    common  setpgid                 sys_setpgid
+110    common  getppid                 sys_getppid
+111    common  getpgrp                 sys_getpgrp
+112    common  setsid                  sys_setsid
+113    common  setreuid                sys_setreuid
+114    common  setregid                sys_setregid
+115    common  getgroups               sys_getgroups
+116    common  setgroups               sys_setgroups
+117    common  setresuid               sys_setresuid
+118    common  getresuid               sys_getresuid
+119    common  setresgid               sys_setresgid
+120    common  getresgid               sys_getresgid
+121    common  getpgid                 sys_getpgid
+122    common  setfsuid                sys_setfsuid
+123    common  setfsgid                sys_setfsgid
+124    common  getsid                  sys_getsid
+125    common  capget                  sys_capget
+126    common  capset                  sys_capset
+127    64      rt_sigpending           sys_rt_sigpending
+128    64      rt_sigtimedwait         sys_rt_sigtimedwait
+129    64      rt_sigqueueinfo         sys_rt_sigqueueinfo
+130    common  rt_sigsuspend           sys_rt_sigsuspend
+131    64      sigaltstack             sys_sigaltstack
+132    common  utime                   sys_utime
+133    common  mknod                   sys_mknod
+134    64      uselib
+135    common  personality             sys_personality
+136    common  ustat                   sys_ustat
+137    common  statfs                  sys_statfs
+138    common  fstatfs                 sys_fstatfs
+139    common  sysfs                   sys_sysfs
+140    common  getpriority             sys_getpriority
+141    common  setpriority             sys_setpriority
+142    common  sched_setparam          sys_sched_setparam
+143    common  sched_getparam          sys_sched_getparam
+144    common  sched_setscheduler      sys_sched_setscheduler
+145    common  sched_getscheduler      sys_sched_getscheduler
+146    common  sched_get_priority_max  sys_sched_get_priority_max
+147    common  sched_get_priority_min  sys_sched_get_priority_min
+148    common  sched_rr_get_interval   sys_sched_rr_get_interval
+149    common  mlock                   sys_mlock
+150    common  munlock                 sys_munlock
+151    common  mlockall                sys_mlockall
+152    common  munlockall              sys_munlockall
+153    common  vhangup                 sys_vhangup
+154    common  modify_ldt              sys_modify_ldt
+155    common  pivot_root              sys_pivot_root
+156    64      _sysctl                 sys_sysctl
+157    common  prctl                   sys_prctl
+158    common  arch_prctl              sys_arch_prctl
+159    common  adjtimex                sys_adjtimex
+160    common  setrlimit               sys_setrlimit
+161    common  chroot                  sys_chroot
+162    common  sync                    sys_sync
+163    common  acct                    sys_acct
+164    common  settimeofday            sys_settimeofday
+165    common  mount                   sys_mount
+166    common  umount2                 sys_umount
+167    common  swapon                  sys_swapon
+168    common  swapoff                 sys_swapoff
+169    common  reboot                  sys_reboot
+170    common  sethostname             sys_sethostname
+171    common  setdomainname           sys_setdomainname
+172    common  iopl                    sys_iopl
+173    common  ioperm                  sys_ioperm
+174    64      create_module
+175    common  init_module             sys_init_module
+176    common  delete_module           sys_delete_module
+177    64      get_kernel_syms
+178    64      query_module
+179    common  quotactl                sys_quotactl
+180    64      nfsservctl
+181    common  getpmsg
+182    common  putpmsg
+183    common  afs_syscall
+184    common  tuxcall
+185    common  security
+186    common  gettid                  sys_gettid
+187    common  readahead               sys_readahead
+188    common  setxattr                sys_setxattr
+189    common  lsetxattr               sys_lsetxattr
+190    common  fsetxattr               sys_fsetxattr
+191    common  getxattr                sys_getxattr
+192    common  lgetxattr               sys_lgetxattr
+193    common  fgetxattr               sys_fgetxattr
+194    common  listxattr               sys_listxattr
+195    common  llistxattr              sys_llistxattr
+196    common  flistxattr              sys_flistxattr
+197    common  removexattr             sys_removexattr
+198    common  lremovexattr            sys_lremovexattr
+199    common  fremovexattr            sys_fremovexattr
+200    common  tkill                   sys_tkill
+201    common  time                    sys_time
+202    common  futex                   sys_futex
+203    common  sched_setaffinity       sys_sched_setaffinity
+204    common  sched_getaffinity       sys_sched_getaffinity
+205    64      set_thread_area
+206    64      io_setup                sys_io_setup
+207    common  io_destroy              sys_io_destroy
+208    common  io_getevents            sys_io_getevents
+209    64      io_submit               sys_io_submit
+210    common  io_cancel               sys_io_cancel
+211    64      get_thread_area
+212    common  lookup_dcookie          sys_lookup_dcookie
+213    common  epoll_create            sys_epoll_create
+214    64      epoll_ctl_old
+215    64      epoll_wait_old
+216    common  remap_file_pages        sys_remap_file_pages
+217    common  getdents64              sys_getdents64
+218    common  set_tid_address         sys_set_tid_address
+219    common  restart_syscall         sys_restart_syscall
+220    common  semtimedop              sys_semtimedop
+221    common  fadvise64               sys_fadvise64
+222    64      timer_create            sys_timer_create
+223    common  timer_settime           sys_timer_settime
+224    common  timer_gettime           sys_timer_gettime
+225    common  timer_getoverrun        sys_timer_getoverrun
+226    common  timer_delete            sys_timer_delete
+227    common  clock_settime           sys_clock_settime
+228    common  clock_gettime           sys_clock_gettime
+229    common  clock_getres            sys_clock_getres
+230    common  clock_nanosleep         sys_clock_nanosleep
+231    common  exit_group              sys_exit_group
+232    common  epoll_wait              sys_epoll_wait
+233    common  epoll_ctl               sys_epoll_ctl
+234    common  tgkill                  sys_tgkill
+235    common  utimes                  sys_utimes
+236    64      vserver
+237    common  mbind                   sys_mbind
+238    common  set_mempolicy           sys_set_mempolicy
+239    common  get_mempolicy           sys_get_mempolicy
+240    common  mq_open                 sys_mq_open
+241    common  mq_unlink               sys_mq_unlink
+242    common  mq_timedsend            sys_mq_timedsend
+243    common  mq_timedreceive         sys_mq_timedreceive
+244    64      mq_notify               sys_mq_notify
+245    common  mq_getsetattr           sys_mq_getsetattr
+246    64      kexec_load              sys_kexec_load
+247    64      waitid                  sys_waitid
+248    common  add_key                 sys_add_key
+249    common  request_key             sys_request_key
+250    common  keyctl                  sys_keyctl
+251    common  ioprio_set              sys_ioprio_set
+252    common  ioprio_get              sys_ioprio_get
+253    common  inotify_init            sys_inotify_init
+254    common  inotify_add_watch       sys_inotify_add_watch
+255    common  inotify_rm_watch        sys_inotify_rm_watch
+256    common  migrate_pages           sys_migrate_pages
+257    common  openat                  sys_openat
+258    common  mkdirat                 sys_mkdirat
+259    common  mknodat                 sys_mknodat
+260    common  fchownat                sys_fchownat
+261    common  futimesat               sys_futimesat
+262    common  newfstatat              sys_newfstatat
+263    common  unlinkat                sys_unlinkat
+264    common  renameat                sys_renameat
+265    common  linkat                  sys_linkat
+266    common  symlinkat               sys_symlinkat
+267    common  readlinkat              sys_readlinkat
+268    common  fchmodat                sys_fchmodat
+269    common  faccessat               sys_faccessat
+270    common  pselect6                sys_pselect6
+271    common  ppoll                   sys_ppoll
+272    common  unshare                 sys_unshare
+273    64      set_robust_list         sys_set_robust_list
+274    64      get_robust_list         sys_get_robust_list
+275    common  splice                  sys_splice
+276    common  tee                     sys_tee
+277    common  sync_file_range         sys_sync_file_range
+278    64      vmsplice                sys_vmsplice
+279    64      move_pages              sys_move_pages
+280    common  utimensat               sys_utimensat
+281    common  epoll_pwait             sys_epoll_pwait
+282    common  signalfd                sys_signalfd
+283    common  timerfd_create          sys_timerfd_create
+284    common  eventfd                 sys_eventfd
+285    common  fallocate               sys_fallocate
+286    common  timerfd_settime         sys_timerfd_settime
+287    common  timerfd_gettime         sys_timerfd_gettime
+288    common  accept4                 sys_accept4
+289    common  signalfd4               sys_signalfd4
+290    common  eventfd2                sys_eventfd2
+291    common  epoll_create1           sys_epoll_create1
+292    common  dup3                    sys_dup3
+293    common  pipe2                   sys_pipe2
+294    common  inotify_init1           sys_inotify_init1
+295    64      preadv                  sys_preadv
+296    64      pwritev                 sys_pwritev
+297    64      rt_tgsigqueueinfo       sys_rt_tgsigqueueinfo
+298    common  perf_event_open         sys_perf_event_open
+299    64      recvmmsg                sys_recvmmsg
+300    common  fanotify_init           sys_fanotify_init
+301    common  fanotify_mark           sys_fanotify_mark
+302    common  prlimit64               sys_prlimit64
+303    common  name_to_handle_at       sys_name_to_handle_at
+304    common  open_by_handle_at       sys_open_by_handle_at
+305    common  clock_adjtime           sys_clock_adjtime
+306    common  syncfs                  sys_syncfs
+307    64      sendmmsg                sys_sendmmsg
+308    common  setns                   sys_setns
+309    common  getcpu                  sys_getcpu
+310    64      process_vm_readv        sys_process_vm_readv
+311    64      process_vm_writev       sys_process_vm_writev
+312    common  kcmp                    sys_kcmp
+313    common  finit_module            sys_finit_module
+314    common  sched_setattr           sys_sched_setattr
+315    common  sched_getattr           sys_sched_getattr
+316    common  renameat2               sys_renameat2
+317    common  seccomp                 sys_seccomp
+318    common  getrandom               sys_getrandom
+319    common  memfd_create            sys_memfd_create
+320    common  kexec_file_load         sys_kexec_file_load
+321    common  bpf                     sys_bpf
+322    64      execveat                stub_execveat
+
+#
+# x32-specific system call numbers start at 512 to avoid cache impact
+# for native 64-bit operation.
+#
+512    x32     rt_sigaction            compat_sys_rt_sigaction
+513    x32     rt_sigreturn            stub_x32_rt_sigreturn
+514    x32     ioctl                   compat_sys_ioctl
+515    x32     readv                   compat_sys_readv
+516    x32     writev                  compat_sys_writev
+517    x32     recvfrom                compat_sys_recvfrom
+518    x32     sendmsg                 compat_sys_sendmsg
+519    x32     recvmsg                 compat_sys_recvmsg
+520    x32     execve                  stub_x32_execve
+521    x32     ptrace                  compat_sys_ptrace
+522    x32     rt_sigpending           compat_sys_rt_sigpending
+523    x32     rt_sigtimedwait         compat_sys_rt_sigtimedwait
+524    x32     rt_sigqueueinfo         compat_sys_rt_sigqueueinfo
+525    x32     sigaltstack             compat_sys_sigaltstack
+526    x32     timer_create            compat_sys_timer_create
+527    x32     mq_notify               compat_sys_mq_notify
+528    x32     kexec_load              compat_sys_kexec_load
+529    x32     waitid                  compat_sys_waitid
+530    x32     set_robust_list         compat_sys_set_robust_list
+531    x32     get_robust_list         compat_sys_get_robust_list
+532    x32     vmsplice                compat_sys_vmsplice
+533    x32     move_pages              compat_sys_move_pages
+534    x32     preadv                  compat_sys_preadv64
+535    x32     pwritev                 compat_sys_pwritev64
+536    x32     rt_tgsigqueueinfo       compat_sys_rt_tgsigqueueinfo
+537    x32     recvmmsg                compat_sys_recvmmsg
+538    x32     sendmmsg                compat_sys_sendmmsg
+539    x32     process_vm_readv        compat_sys_process_vm_readv
+540    x32     process_vm_writev       compat_sys_process_vm_writev
+541    x32     setsockopt              compat_sys_setsockopt
+542    x32     getsockopt              compat_sys_getsockopt
+543    x32     io_setup                compat_sys_io_setup
+544    x32     io_submit               compat_sys_io_submit
+545    x32     execveat                stub_x32_execveat
diff --git a/arch/x86/entry/syscalls/syscallhdr.sh b/arch/x86/entry/syscalls/syscallhdr.sh
new file mode 100644 (file)
index 0000000..31fd5f1
--- /dev/null
@@ -0,0 +1,27 @@
+#!/bin/sh
+
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+prefix="$4"
+offset="$5"
+
+fileguard=_ASM_X86_`basename "$out" | sed \
+    -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
+    -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
+    echo "#ifndef ${fileguard}"
+    echo "#define ${fileguard} 1"
+    echo ""
+
+    while read nr abi name entry ; do
+       if [ -z "$offset" ]; then
+           echo "#define __NR_${prefix}${name} $nr"
+       else
+           echo "#define __NR_${prefix}${name} ($offset + $nr)"
+        fi
+    done
+
+    echo ""
+    echo "#endif /* ${fileguard} */"
+) > "$out"
diff --git a/arch/x86/entry/syscalls/syscalltbl.sh b/arch/x86/entry/syscalls/syscalltbl.sh
new file mode 100644 (file)
index 0000000..0e7f8ec
--- /dev/null
@@ -0,0 +1,15 @@
+#!/bin/sh
+
+in="$1"
+out="$2"
+
+grep '^[0-9]' "$in" | sort -n | (
+    while read nr abi name entry compat; do
+       abi=`echo "$abi" | tr '[a-z]' '[A-Z]'`
+       if [ -n "$compat" ]; then
+           echo "__SYSCALL_${abi}($nr, $entry, $compat)"
+       elif [ -n "$entry" ]; then
+           echo "__SYSCALL_${abi}($nr, $entry, $entry)"
+       fi
+    done
+) > "$out"
diff --git a/arch/x86/entry/thunk_32.S b/arch/x86/entry/thunk_32.S
new file mode 100644 (file)
index 0000000..e5a1711
--- /dev/null
@@ -0,0 +1,40 @@
+/*
+ * Trampoline to trace irqs off. (otherwise CALLER_ADDR1 might crash)
+ * Copyright 2008 by Steven Rostedt, Red Hat, Inc
+ *  (inspired by Andi Kleen's thunk_64.S)
+ * Subject to the GNU public license, v.2. No warranty of any kind.
+ */
+       #include <linux/linkage.h>
+       #include <asm/asm.h>
+
+       /* put return address in eax (arg1) */
+       .macro THUNK name, func, put_ret_addr_in_eax=0
+       .globl \name
+\name:
+       pushl %eax
+       pushl %ecx
+       pushl %edx
+
+       .if \put_ret_addr_in_eax
+       /* Place EIP in the arg1 */
+       movl 3*4(%esp), %eax
+       .endif
+
+       call \func
+       popl %edx
+       popl %ecx
+       popl %eax
+       ret
+       _ASM_NOKPROBE(\name)
+       .endm
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+       THUNK trace_hardirqs_on_thunk,trace_hardirqs_on_caller,1
+       THUNK trace_hardirqs_off_thunk,trace_hardirqs_off_caller,1
+#endif
+
+#ifdef CONFIG_PREEMPT
+       THUNK ___preempt_schedule, preempt_schedule
+       THUNK ___preempt_schedule_notrace, preempt_schedule_notrace
+#endif
+
diff --git a/arch/x86/entry/thunk_64.S b/arch/x86/entry/thunk_64.S
new file mode 100644 (file)
index 0000000..efb2b93
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ * Save registers before calling assembly functions. This avoids
+ * disturbance of register allocation in some inline assembly constructs.
+ * Copyright 2001,2002 by Andi Kleen, SuSE Labs.
+ * Added trace_hardirqs callers - Copyright 2007 Steven Rostedt, Red Hat, Inc.
+ * Subject to the GNU public license, v.2. No warranty of any kind.
+ */
+#include <linux/linkage.h>
+#include "calling.h"
+#include <asm/asm.h>
+
+       /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
+       .macro THUNK name, func, put_ret_addr_in_rdi=0
+       .globl \name
+\name:
+
+       /* this one pushes 9 elems, the next one would be %rIP */
+       pushq %rdi
+       pushq %rsi
+       pushq %rdx
+       pushq %rcx
+       pushq %rax
+       pushq %r8
+       pushq %r9
+       pushq %r10
+       pushq %r11
+
+       .if \put_ret_addr_in_rdi
+       /* 9*8(%rsp) is return addr on stack */
+       movq 9*8(%rsp), %rdi
+       .endif
+
+       call \func
+       jmp  restore
+       _ASM_NOKPROBE(\name)
+       .endm
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+       THUNK trace_hardirqs_on_thunk,trace_hardirqs_on_caller,1
+       THUNK trace_hardirqs_off_thunk,trace_hardirqs_off_caller,1
+#endif
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+       THUNK lockdep_sys_exit_thunk,lockdep_sys_exit
+#endif
+
+#ifdef CONFIG_PREEMPT
+       THUNK ___preempt_schedule, preempt_schedule
+       THUNK ___preempt_schedule_notrace, preempt_schedule_notrace
+#endif
+
+#if defined(CONFIG_TRACE_IRQFLAGS) \
+ || defined(CONFIG_DEBUG_LOCK_ALLOC) \
+ || defined(CONFIG_PREEMPT)
+restore:
+       popq %r11
+       popq %r10
+       popq %r9
+       popq %r8
+       popq %rax
+       popq %rcx
+       popq %rdx
+       popq %rsi
+       popq %rdi
+       ret
+       _ASM_NOKPROBE(restore)
+#endif
diff --git a/arch/x86/entry/vdso/.gitignore b/arch/x86/entry/vdso/.gitignore
new file mode 100644 (file)
index 0000000..aae8ffd
--- /dev/null
@@ -0,0 +1,7 @@
+vdso.lds
+vdsox32.lds
+vdso32-syscall-syms.lds
+vdso32-sysenter-syms.lds
+vdso32-int80-syms.lds
+vdso-image-*.c
+vdso2c
diff --git a/arch/x86/entry/vdso/Makefile b/arch/x86/entry/vdso/Makefile
new file mode 100644 (file)
index 0000000..e970320
--- /dev/null
@@ -0,0 +1,209 @@
+#
+# Building vDSO images for x86.
+#
+
+KBUILD_CFLAGS += $(DISABLE_LTO)
+KASAN_SANITIZE := n
+
+VDSO64-$(CONFIG_X86_64)                := y
+VDSOX32-$(CONFIG_X86_X32_ABI)  := y
+VDSO32-$(CONFIG_X86_32)                := y
+VDSO32-$(CONFIG_COMPAT)                := y
+
+# files to link into the vdso
+vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o
+
+# files to link into kernel
+obj-y                          += vma.o
+
+# vDSO images to build
+vdso_img-$(VDSO64-y)           += 64
+vdso_img-$(VDSOX32-y)          += x32
+vdso_img-$(VDSO32-y)           += 32-int80
+vdso_img-$(CONFIG_COMPAT)      += 32-syscall
+vdso_img-$(VDSO32-y)           += 32-sysenter
+
+obj-$(VDSO32-y)                        += vdso32-setup.o
+
+vobjs := $(foreach F,$(vobjs-y),$(obj)/$F)
+
+$(obj)/vdso.o: $(obj)/vdso.so
+
+targets += vdso.lds $(vobjs-y)
+
+# Build the vDSO image C files and link them in.
+vdso_img_objs := $(vdso_img-y:%=vdso-image-%.o)
+vdso_img_cfiles := $(vdso_img-y:%=vdso-image-%.c)
+vdso_img_sodbg := $(vdso_img-y:%=vdso%.so.dbg)
+obj-y += $(vdso_img_objs)
+targets += $(vdso_img_cfiles)
+targets += $(vdso_img_sodbg)
+.SECONDARY: $(vdso_img-y:%=$(obj)/vdso-image-%.c) \
+       $(vdso_img-y:%=$(obj)/vdso%.so)
+
+export CPPFLAGS_vdso.lds += -P -C
+
+VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
+                       -Wl,--no-undefined \
+                       -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 \
+                       $(DISABLE_LTO)
+
+$(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
+       $(call if_changed,vdso)
+
+HOST_EXTRACFLAGS += -I$(srctree)/tools/include -I$(srctree)/include/uapi -I$(srctree)/arch/x86/include/uapi
+hostprogs-y                    += vdso2c
+
+quiet_cmd_vdso2c = VDSO2C  $@
+define cmd_vdso2c
+       $(obj)/vdso2c $< $(<:%.dbg=%) $@
+endef
+
+$(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
+       $(call if_changed,vdso2c)
+
+#
+# Don't omit frame pointers for ease of userspace debugging, but do
+# optimize sibling calls.
+#
+CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
+       $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
+       -fno-omit-frame-pointer -foptimize-sibling-calls \
+       -DDISABLE_BRANCH_PROFILING
+
+$(vobjs): KBUILD_CFLAGS += $(CFL)
+
+#
+# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
+#
+CFLAGS_REMOVE_vdso-note.o = -pg
+CFLAGS_REMOVE_vclock_gettime.o = -pg
+CFLAGS_REMOVE_vgetcpu.o = -pg
+CFLAGS_REMOVE_vvar.o = -pg
+
+#
+# X32 processes use x32 vDSO to access 64bit kernel data.
+#
+# Build x32 vDSO image:
+# 1. Compile x32 vDSO as 64bit.
+# 2. Convert object files to x32.
+# 3. Build x32 VDSO image with x32 objects, which contains 64bit codes
+# so that it can reach 64bit address space with 64bit pointers.
+#
+
+CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds)
+VDSO_LDFLAGS_vdsox32.lds = -Wl,-m,elf32_x86_64 \
+                          -Wl,-soname=linux-vdso.so.1 \
+                          -Wl,-z,max-page-size=4096 \
+                          -Wl,-z,common-page-size=4096
+
+# 64-bit objects to re-brand as x32
+vobjs64-for-x32 := $(filter-out $(vobjs-nox32),$(vobjs-y))
+
+# x32-rebranded versions
+vobjx32s-y := $(vobjs64-for-x32:.o=-x32.o)
+
+# same thing, but in the output directory
+vobjx32s := $(foreach F,$(vobjx32s-y),$(obj)/$F)
+
+# Convert 64bit object file to x32 for x32 vDSO.
+quiet_cmd_x32 = X32     $@
+      cmd_x32 = $(OBJCOPY) -O elf32-x86-64 $< $@
+
+$(obj)/%-x32.o: $(obj)/%.o FORCE
+       $(call if_changed,x32)
+
+targets += vdsox32.lds $(vobjx32s-y)
+
+$(obj)/%.so: OBJCOPYFLAGS := -S
+$(obj)/%.so: $(obj)/%.so.dbg
+       $(call if_changed,objcopy)
+
+$(obj)/vdsox32.so.dbg: $(src)/vdsox32.lds $(vobjx32s) FORCE
+       $(call if_changed,vdso)
+
+#
+# Build multiple 32-bit vDSO images to choose from at boot time.
+#
+vdso32.so-$(VDSO32-y)          += int80
+vdso32.so-$(CONFIG_COMPAT)     += syscall
+vdso32.so-$(VDSO32-y)          += sysenter
+
+vdso32-images                  = $(vdso32.so-y:%=vdso32-%.so)
+
+CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
+VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf_i386 -Wl,-soname=linux-gate.so.1
+
+# This makes sure the $(obj) subdirectory exists even though vdso32/
+# is not a kbuild sub-make subdirectory.
+override obj-dirs = $(dir $(obj)) $(obj)/vdso32/
+
+targets += vdso32/vdso32.lds
+targets += vdso32/note.o vdso32/vclock_gettime.o $(vdso32.so-y:%=vdso32/%.o)
+targets += vdso32/vclock_gettime.o
+
+$(obj)/vdso32.o: $(vdso32-images:%=$(obj)/%)
+
+KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS))
+$(vdso32-images:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_32)
+$(vdso32-images:%=$(obj)/%.dbg): asflags-$(CONFIG_X86_64) += -m32
+
+KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
+KBUILD_CFLAGS_32 := $(filter-out -mcmodel=kernel,$(KBUILD_CFLAGS_32))
+KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32))
+KBUILD_CFLAGS_32 := $(filter-out -mfentry,$(KBUILD_CFLAGS_32))
+KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic
+KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
+KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
+KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
+KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
+$(vdso32-images:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
+
+$(vdso32-images:%=$(obj)/%.dbg): $(obj)/vdso32-%.so.dbg: FORCE \
+                                $(obj)/vdso32/vdso32.lds \
+                                $(obj)/vdso32/vclock_gettime.o \
+                                $(obj)/vdso32/note.o \
+                                $(obj)/vdso32/%.o
+       $(call if_changed,vdso)
+
+#
+# The DSO images are built using a special linker script.
+#
+quiet_cmd_vdso = VDSO    $@
+      cmd_vdso = $(CC) -nostdlib -o $@ \
+                      $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
+                      -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
+                sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
+
+VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
+       $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
+GCOV_PROFILE := n
+
+#
+# Install the unstripped copies of vdso*.so.  If our toolchain supports
+# build-id, install .build-id links as well.
+#
+quiet_cmd_vdso_install = INSTALL $(@:install_%=%)
+define cmd_vdso_install
+       cp $< "$(MODLIB)/vdso/$(@:install_%=%)"; \
+       if readelf -n $< |grep -q 'Build ID'; then \
+         buildid=`readelf -n $< |grep 'Build ID' |sed -e 's/^.*Build ID: \(.*\)$$/\1/'`; \
+         first=`echo $$buildid | cut -b-2`; \
+         last=`echo $$buildid | cut -b3-`; \
+         mkdir -p "$(MODLIB)/vdso/.build-id/$$first"; \
+         ln -sf "../../$(@:install_%=%)" "$(MODLIB)/vdso/.build-id/$$first/$$last.debug"; \
+       fi
+endef
+
+vdso_img_insttargets := $(vdso_img_sodbg:%.dbg=install_%)
+
+$(MODLIB)/vdso: FORCE
+       @mkdir -p $(MODLIB)/vdso
+
+$(vdso_img_insttargets): install_%: $(obj)/%.dbg $(MODLIB)/vdso FORCE
+       $(call cmd,vdso_install)
+
+PHONY += vdso_install $(vdso_img_insttargets)
+vdso_install: $(vdso_img_insttargets) FORCE
+
+clean-files := vdso32-syscall* vdso32-sysenter* vdso32-int80* vdso64* vdso-image-*.c vdsox32.so*
diff --git a/arch/x86/entry/vdso/checkundef.sh b/arch/x86/entry/vdso/checkundef.sh
new file mode 100755 (executable)
index 0000000..7ee90a9
--- /dev/null
@@ -0,0 +1,10 @@
+#!/bin/sh
+nm="$1"
+file="$2"
+$nm "$file" | grep '^ *U' > /dev/null 2>&1
+if [ $? -eq 1 ]; then
+    exit 0
+else
+    echo "$file: undefined symbols found" >&2
+    exit 1
+fi
diff --git a/arch/x86/entry/vdso/vclock_gettime.c b/arch/x86/entry/vdso/vclock_gettime.c
new file mode 100644 (file)
index 0000000..9793322
--- /dev/null
@@ -0,0 +1,351 @@
+/*
+ * Copyright 2006 Andi Kleen, SUSE Labs.
+ * Subject to the GNU Public License, v.2
+ *
+ * Fast user context implementation of clock_gettime, gettimeofday, and time.
+ *
+ * 32 Bit compat layer by Stefani Seibold <stefani@seibold.net>
+ *  sponsored by Rohde & Schwarz GmbH & Co. KG Munich/Germany
+ *
+ * The code should have no internal unresolved relocations.
+ * Check with readelf after changing.
+ */
+
+#include <uapi/linux/time.h>
+#include <asm/vgtod.h>
+#include <asm/hpet.h>
+#include <asm/vvar.h>
+#include <asm/unistd.h>
+#include <asm/msr.h>
+#include <linux/math64.h>
+#include <linux/time.h>
+
+#define gtod (&VVAR(vsyscall_gtod_data))
+
+extern int __vdso_clock_gettime(clockid_t clock, struct timespec *ts);
+extern int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz);
+extern time_t __vdso_time(time_t *t);
+
+#ifdef CONFIG_HPET_TIMER
+extern u8 hpet_page
+       __attribute__((visibility("hidden")));
+
+static notrace cycle_t vread_hpet(void)
+{
+       return *(const volatile u32 *)(&hpet_page + HPET_COUNTER);
+}
+#endif
+
+#ifndef BUILD_VDSO32
+
+#include <linux/kernel.h>
+#include <asm/vsyscall.h>
+#include <asm/fixmap.h>
+#include <asm/pvclock.h>
+
+notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
+{
+       long ret;
+       asm("syscall" : "=a" (ret) :
+           "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
+       return ret;
+}
+
+notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
+{
+       long ret;
+
+       asm("syscall" : "=a" (ret) :
+           "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
+       return ret;
+}
+
+#ifdef CONFIG_PARAVIRT_CLOCK
+
+static notrace const struct pvclock_vsyscall_time_info *get_pvti(int cpu)
+{
+       const struct pvclock_vsyscall_time_info *pvti_base;
+       int idx = cpu / (PAGE_SIZE/PVTI_SIZE);
+       int offset = cpu % (PAGE_SIZE/PVTI_SIZE);
+
+       BUG_ON(PVCLOCK_FIXMAP_BEGIN + idx > PVCLOCK_FIXMAP_END);
+
+       pvti_base = (struct pvclock_vsyscall_time_info *)
+                   __fix_to_virt(PVCLOCK_FIXMAP_BEGIN+idx);
+
+       return &pvti_base[offset];
+}
+
+static notrace cycle_t vread_pvclock(int *mode)
+{
+       const struct pvclock_vsyscall_time_info *pvti;
+       cycle_t ret;
+       u64 last;
+       u32 version;
+       u8 flags;
+       unsigned cpu, cpu1;
+
+
+       /*
+        * Note: hypervisor must guarantee that:
+        * 1. cpu ID number maps 1:1 to per-CPU pvclock time info.
+        * 2. that per-CPU pvclock time info is updated if the
+        *    underlying CPU changes.
+        * 3. that version is increased whenever underlying CPU
+        *    changes.
+        *
+        */
+       do {
+               cpu = __getcpu() & VGETCPU_CPU_MASK;
+               /* TODO: We can put vcpu id into higher bits of pvti.version.
+                * This will save a couple of cycles by getting rid of
+                * __getcpu() calls (Gleb).
+                */
+
+               pvti = get_pvti(cpu);
+
+               version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags);
+
+               /*
+                * Test we're still on the cpu as well as the version.
+                * We could have been migrated just after the first
+                * vgetcpu but before fetching the version, so we
+                * wouldn't notice a version change.
+                */
+               cpu1 = __getcpu() & VGETCPU_CPU_MASK;
+       } while (unlikely(cpu != cpu1 ||
+                         (pvti->pvti.version & 1) ||
+                         pvti->pvti.version != version));
+
+       if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT)))
+               *mode = VCLOCK_NONE;
+
+       /* refer to tsc.c read_tsc() comment for rationale */
+       last = gtod->cycle_last;
+
+       if (likely(ret >= last))
+               return ret;
+
+       return last;
+}
+#endif
+
+#else
+
+notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
+{
+       long ret;
+
+       asm(
+               "mov %%ebx, %%edx \n"
+               "mov %2, %%ebx \n"
+               "call __kernel_vsyscall \n"
+               "mov %%edx, %%ebx \n"
+               : "=a" (ret)
+               : "0" (__NR_clock_gettime), "g" (clock), "c" (ts)
+               : "memory", "edx");
+       return ret;
+}
+
+notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
+{
+       long ret;
+
+       asm(
+               "mov %%ebx, %%edx \n"
+               "mov %2, %%ebx \n"
+               "call __kernel_vsyscall \n"
+               "mov %%edx, %%ebx \n"
+               : "=a" (ret)
+               : "0" (__NR_gettimeofday), "g" (tv), "c" (tz)
+               : "memory", "edx");
+       return ret;
+}
+
+#ifdef CONFIG_PARAVIRT_CLOCK
+
+static notrace cycle_t vread_pvclock(int *mode)
+{
+       *mode = VCLOCK_NONE;
+       return 0;
+}
+#endif
+
+#endif
+
+notrace static cycle_t vread_tsc(void)
+{
+       cycle_t ret;
+       u64 last;
+
+       /*
+        * Empirically, a fence (of type that depends on the CPU)
+        * before rdtsc is enough to ensure that rdtsc is ordered
+        * with respect to loads.  The various CPU manuals are unclear
+        * as to whether rdtsc can be reordered with later loads,
+        * but no one has ever seen it happen.
+        */
+       rdtsc_barrier();
+       ret = (cycle_t)__native_read_tsc();
+
+       last = gtod->cycle_last;
+
+       if (likely(ret >= last))
+               return ret;
+
+       /*
+        * GCC likes to generate cmov here, but this branch is extremely
+        * predictable (it's just a funciton of time and the likely is
+        * very likely) and there's a data dependence, so force GCC
+        * to generate a branch instead.  I don't barrier() because
+        * we don't actually need a barrier, and if this function
+        * ever gets inlined it will generate worse code.
+        */
+       asm volatile ("");
+       return last;
+}
+
+notrace static inline u64 vgetsns(int *mode)
+{
+       u64 v;
+       cycles_t cycles;
+
+       if (gtod->vclock_mode == VCLOCK_TSC)
+               cycles = vread_tsc();
+#ifdef CONFIG_HPET_TIMER
+       else if (gtod->vclock_mode == VCLOCK_HPET)
+               cycles = vread_hpet();
+#endif
+#ifdef CONFIG_PARAVIRT_CLOCK
+       else if (gtod->vclock_mode == VCLOCK_PVCLOCK)
+               cycles = vread_pvclock(mode);
+#endif
+       else
+               return 0;
+       v = (cycles - gtod->cycle_last) & gtod->mask;
+       return v * gtod->mult;
+}
+
+/* Code size doesn't matter (vdso is 4k anyway) and this is faster. */
+notrace static int __always_inline do_realtime(struct timespec *ts)
+{
+       unsigned long seq;
+       u64 ns;
+       int mode;
+
+       do {
+               seq = gtod_read_begin(gtod);
+               mode = gtod->vclock_mode;
+               ts->tv_sec = gtod->wall_time_sec;
+               ns = gtod->wall_time_snsec;
+               ns += vgetsns(&mode);
+               ns >>= gtod->shift;
+       } while (unlikely(gtod_read_retry(gtod, seq)));
+
+       ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
+       ts->tv_nsec = ns;
+
+       return mode;
+}
+
+notrace static int __always_inline do_monotonic(struct timespec *ts)
+{
+       unsigned long seq;
+       u64 ns;
+       int mode;
+
+       do {
+               seq = gtod_read_begin(gtod);
+               mode = gtod->vclock_mode;
+               ts->tv_sec = gtod->monotonic_time_sec;
+               ns = gtod->monotonic_time_snsec;
+               ns += vgetsns(&mode);
+               ns >>= gtod->shift;
+       } while (unlikely(gtod_read_retry(gtod, seq)));
+
+       ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
+       ts->tv_nsec = ns;
+
+       return mode;
+}
+
+notrace static void do_realtime_coarse(struct timespec *ts)
+{
+       unsigned long seq;
+       do {
+               seq = gtod_read_begin(gtod);
+               ts->tv_sec = gtod->wall_time_coarse_sec;
+               ts->tv_nsec = gtod->wall_time_coarse_nsec;
+       } while (unlikely(gtod_read_retry(gtod, seq)));
+}
+
+notrace static void do_monotonic_coarse(struct timespec *ts)
+{
+       unsigned long seq;
+       do {
+               seq = gtod_read_begin(gtod);
+               ts->tv_sec = gtod->monotonic_time_coarse_sec;
+               ts->tv_nsec = gtod->monotonic_time_coarse_nsec;
+       } while (unlikely(gtod_read_retry(gtod, seq)));
+}
+
+notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
+{
+       switch (clock) {
+       case CLOCK_REALTIME:
+               if (do_realtime(ts) == VCLOCK_NONE)
+                       goto fallback;
+               break;
+       case CLOCK_MONOTONIC:
+               if (do_monotonic(ts) == VCLOCK_NONE)
+                       goto fallback;
+               break;
+       case CLOCK_REALTIME_COARSE:
+               do_realtime_coarse(ts);
+               break;
+       case CLOCK_MONOTONIC_COARSE:
+               do_monotonic_coarse(ts);
+               break;
+       default:
+               goto fallback;
+       }
+
+       return 0;
+fallback:
+       return vdso_fallback_gettime(clock, ts);
+}
+int clock_gettime(clockid_t, struct timespec *)
+       __attribute__((weak, alias("__vdso_clock_gettime")));
+
+notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
+{
+       if (likely(tv != NULL)) {
+               if (unlikely(do_realtime((struct timespec *)tv) == VCLOCK_NONE))
+                       return vdso_fallback_gtod(tv, tz);
+               tv->tv_usec /= 1000;
+       }
+       if (unlikely(tz != NULL)) {
+               tz->tz_minuteswest = gtod->tz_minuteswest;
+               tz->tz_dsttime = gtod->tz_dsttime;
+       }
+
+       return 0;
+}
+int gettimeofday(struct timeval *, struct timezone *)
+       __attribute__((weak, alias("__vdso_gettimeofday")));
+
+/*
+ * This will break when the xtime seconds get inaccurate, but that is
+ * unlikely
+ */
+notrace time_t __vdso_time(time_t *t)
+{
+       /* This is atomic on x86 so we don't need any locks. */
+       time_t result = ACCESS_ONCE(gtod->wall_time_sec);
+
+       if (t)
+               *t = result;
+       return result;
+}
+int time(time_t *t)
+       __attribute__((weak, alias("__vdso_time")));
diff --git a/arch/x86/entry/vdso/vdso-layout.lds.S b/arch/x86/entry/vdso/vdso-layout.lds.S
new file mode 100644 (file)
index 0000000..de2c921
--- /dev/null
@@ -0,0 +1,118 @@
+#include <asm/vdso.h>
+
+/*
+ * Linker script for vDSO.  This is an ELF shared object prelinked to
+ * its virtual address, and with only one read-only segment.
+ * This script controls its layout.
+ */
+
+#if defined(BUILD_VDSO64)
+# define SHDR_SIZE 64
+#elif defined(BUILD_VDSO32) || defined(BUILD_VDSOX32)
+# define SHDR_SIZE 40
+#else
+# error unknown VDSO target
+#endif
+
+#define NUM_FAKE_SHDRS 13
+
+SECTIONS
+{
+       /*
+        * User/kernel shared data is before the vDSO.  This may be a little
+        * uglier than putting it after the vDSO, but it avoids issues with
+        * non-allocatable things that dangle past the end of the PT_LOAD
+        * segment.
+        */
+
+       vvar_start = . - 2 * PAGE_SIZE;
+       vvar_page = vvar_start;
+
+       /* Place all vvars at the offsets in asm/vvar.h. */
+#define EMIT_VVAR(name, offset) vvar_ ## name = vvar_page + offset;
+#define __VVAR_KERNEL_LDS
+#include <asm/vvar.h>
+#undef __VVAR_KERNEL_LDS
+#undef EMIT_VVAR
+
+       hpet_page = vvar_start + PAGE_SIZE;
+
+       . = SIZEOF_HEADERS;
+
+       .hash           : { *(.hash) }                  :text
+       .gnu.hash       : { *(.gnu.hash) }
+       .dynsym         : { *(.dynsym) }
+       .dynstr         : { *(.dynstr) }
+       .gnu.version    : { *(.gnu.version) }
+       .gnu.version_d  : { *(.gnu.version_d) }
+       .gnu.version_r  : { *(.gnu.version_r) }
+
+       .dynamic        : { *(.dynamic) }               :text   :dynamic
+
+       .rodata         : {
+               *(.rodata*)
+               *(.data*)
+               *(.sdata*)
+               *(.got.plt) *(.got)
+               *(.gnu.linkonce.d.*)
+               *(.bss*)
+               *(.dynbss*)
+               *(.gnu.linkonce.b.*)
+
+               /*
+                * Ideally this would live in a C file, but that won't
+                * work cleanly for x32 until we start building the x32
+                * C code using an x32 toolchain.
+                */
+               VDSO_FAKE_SECTION_TABLE_START = .;
+               . = . + NUM_FAKE_SHDRS * SHDR_SIZE;
+               VDSO_FAKE_SECTION_TABLE_END = .;
+       }                                               :text
+
+       .fake_shstrtab  : { *(.fake_shstrtab) }         :text
+
+
+       .note           : { *(.note.*) }                :text   :note
+
+       .eh_frame_hdr   : { *(.eh_frame_hdr) }          :text   :eh_frame_hdr
+       .eh_frame       : { KEEP (*(.eh_frame)) }       :text
+
+
+       /*
+        * Text is well-separated from actual data: there's plenty of
+        * stuff that isn't used at runtime in between.
+        */
+
+       .text           : { *(.text*) }                 :text   =0x90909090,
+
+       /*
+        * At the end so that eu-elflint stays happy when vdso2c strips
+        * these.  A better implementation would avoid allocating space
+        * for these.
+        */
+       .altinstructions        : { *(.altinstructions) }       :text
+       .altinstr_replacement   : { *(.altinstr_replacement) }  :text
+
+       /DISCARD/ : {
+               *(.discard)
+               *(.discard.*)
+               *(__bug_table)
+       }
+}
+
+/*
+ * Very old versions of ld do not recognize this name token; use the constant.
+ */
+#define PT_GNU_EH_FRAME        0x6474e550
+
+/*
+ * We must supply the ELF program headers explicitly to get just one
+ * PT_LOAD segment, and set the flags explicitly to make segments read-only.
+ */
+PHDRS
+{
+       text            PT_LOAD         FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
+       dynamic         PT_DYNAMIC      FLAGS(4);               /* PF_R */
+       note            PT_NOTE         FLAGS(4);               /* PF_R */
+       eh_frame_hdr    PT_GNU_EH_FRAME;
+}
diff --git a/arch/x86/entry/vdso/vdso-note.S b/arch/x86/entry/vdso/vdso-note.S
new file mode 100644 (file)
index 0000000..79a071e
--- /dev/null
@@ -0,0 +1,12 @@
+/*
+ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
+ * Here we can supply some information useful to userland.
+ */
+
+#include <linux/uts.h>
+#include <linux/version.h>
+#include <linux/elfnote.h>
+
+ELFNOTE_START(Linux, 0, "a")
+       .long LINUX_VERSION_CODE
+ELFNOTE_END
diff --git a/arch/x86/entry/vdso/vdso.lds.S b/arch/x86/entry/vdso/vdso.lds.S
new file mode 100644 (file)
index 0000000..6807932
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Linker script for 64-bit vDSO.
+ * We #include the file to define the layout details.
+ *
+ * This file defines the version script giving the user-exported symbols in
+ * the DSO.
+ */
+
+#define BUILD_VDSO64
+
+#include "vdso-layout.lds.S"
+
+/*
+ * This controls what userland symbols we export from the vDSO.
+ */
+VERSION {
+       LINUX_2.6 {
+       global:
+               clock_gettime;
+               __vdso_clock_gettime;
+               gettimeofday;
+               __vdso_gettimeofday;
+               getcpu;
+               __vdso_getcpu;
+               time;
+               __vdso_time;
+       local: *;
+       };
+}
diff --git a/arch/x86/entry/vdso/vdso2c.c b/arch/x86/entry/vdso/vdso2c.c
new file mode 100644 (file)
index 0000000..8627db2
--- /dev/null
@@ -0,0 +1,253 @@
+/*
+ * vdso2c - A vdso image preparation tool
+ * Copyright (c) 2014 Andy Lutomirski and others
+ * Licensed under the GPL v2
+ *
+ * vdso2c requires stripped and unstripped input.  It would be trivial
+ * to fully strip the input in here, but, for reasons described below,
+ * we need to write a section table.  Doing this is more or less
+ * equivalent to dropping all non-allocatable sections, but it's
+ * easier to let objcopy handle that instead of doing it ourselves.
+ * If we ever need to do something fancier than what objcopy provides,
+ * it would be straightforward to add here.
+ *
+ * We're keep a section table for a few reasons:
+ *
+ * The Go runtime had a couple of bugs: it would read the section
+ * table to try to figure out how many dynamic symbols there were (it
+ * shouldn't have looked at the section table at all) and, if there
+ * were no SHT_SYNDYM section table entry, it would use an
+ * uninitialized value for the number of symbols.  An empty DYNSYM
+ * table would work, but I see no reason not to write a valid one (and
+ * keep full performance for old Go programs).  This hack is only
+ * needed on x86_64.
+ *
+ * The bug was introduced on 2012-08-31 by:
+ * https://code.google.com/p/go/source/detail?r=56ea40aac72b
+ * and was fixed on 2014-06-13 by:
+ * https://code.google.com/p/go/source/detail?r=fc1cd5e12595
+ *
+ * Binutils has issues debugging the vDSO: it reads the section table to
+ * find SHT_NOTE; it won't look at PT_NOTE for the in-memory vDSO, which
+ * would break build-id if we removed the section table.  Binutils
+ * also requires that shstrndx != 0.  See:
+ * https://sourceware.org/bugzilla/show_bug.cgi?id=17064
+ *
+ * elfutils might not look for PT_NOTE if there is a section table at
+ * all.  I don't know whether this matters for any practical purpose.
+ *
+ * For simplicity, rather than hacking up a partial section table, we
+ * just write a mostly complete one.  We omit non-dynamic symbols,
+ * though, since they're rather large.
+ *
+ * Once binutils gets fixed, we might be able to drop this for all but
+ * the 64-bit vdso, since build-id only works in kernel RPMs, and
+ * systems that update to new enough kernel RPMs will likely update
+ * binutils in sync.  build-id has never worked for home-built kernel
+ * RPMs without manual symlinking, and I suspect that no one ever does
+ * that.
+ */
+
+#include <inttypes.h>
+#include <stdint.h>
+#include <unistd.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <fcntl.h>
+#include <err.h>
+
+#include <sys/mman.h>
+#include <sys/types.h>
+
+#include <tools/le_byteshift.h>
+
+#include <linux/elf.h>
+#include <linux/types.h>
+
+const char *outfilename;
+
+/* Symbols that we need in vdso2c. */
+enum {
+       sym_vvar_start,
+       sym_vvar_page,
+       sym_hpet_page,
+       sym_VDSO_FAKE_SECTION_TABLE_START,
+       sym_VDSO_FAKE_SECTION_TABLE_END,
+};
+
+const int special_pages[] = {
+       sym_vvar_page,
+       sym_hpet_page,
+};
+
+struct vdso_sym {
+       const char *name;
+       bool export;
+};
+
+struct vdso_sym required_syms[] = {
+       [sym_vvar_start] = {"vvar_start", true},
+       [sym_vvar_page] = {"vvar_page", true},
+       [sym_hpet_page] = {"hpet_page", true},
+       [sym_VDSO_FAKE_SECTION_TABLE_START] = {
+               "VDSO_FAKE_SECTION_TABLE_START", false
+       },
+       [sym_VDSO_FAKE_SECTION_TABLE_END] = {
+               "VDSO_FAKE_SECTION_TABLE_END", false
+       },
+       {"VDSO32_NOTE_MASK", true},
+       {"VDSO32_SYSENTER_RETURN", true},
+       {"__kernel_vsyscall", true},
+       {"__kernel_sigreturn", true},
+       {"__kernel_rt_sigreturn", true},
+};
+
+__attribute__((format(printf, 1, 2))) __attribute__((noreturn))
+static void fail(const char *format, ...)
+{
+       va_list ap;
+       va_start(ap, format);
+       fprintf(stderr, "Error: ");
+       vfprintf(stderr, format, ap);
+       if (outfilename)
+               unlink(outfilename);
+       exit(1);
+       va_end(ap);
+}
+
+/*
+ * Evil macros for little-endian reads and writes
+ */
+#define GLE(x, bits, ifnot)                                            \
+       __builtin_choose_expr(                                          \
+               (sizeof(*(x)) == bits/8),                               \
+               (__typeof__(*(x)))get_unaligned_le##bits(x), ifnot)
+
+extern void bad_get_le(void);
+#define LAST_GLE(x)                                                    \
+       __builtin_choose_expr(sizeof(*(x)) == 1, *(x), bad_get_le())
+
+#define GET_LE(x)                                                      \
+       GLE(x, 64, GLE(x, 32, GLE(x, 16, LAST_GLE(x))))
+
+#define PLE(x, val, bits, ifnot)                                       \
+       __builtin_choose_expr(                                          \
+               (sizeof(*(x)) == bits/8),                               \
+               put_unaligned_le##bits((val), (x)), ifnot)
+
+extern void bad_put_le(void);
+#define LAST_PLE(x, val)                                               \
+       __builtin_choose_expr(sizeof(*(x)) == 1, *(x) = (val), bad_put_le())
+
+#define PUT_LE(x, val)                                 \
+       PLE(x, val, 64, PLE(x, val, 32, PLE(x, val, 16, LAST_PLE(x, val))))
+
+
+#define NSYMS (sizeof(required_syms) / sizeof(required_syms[0]))
+
+#define BITSFUNC3(name, bits, suffix) name##bits##suffix
+#define BITSFUNC2(name, bits, suffix) BITSFUNC3(name, bits, suffix)
+#define BITSFUNC(name) BITSFUNC2(name, ELF_BITS, )
+
+#define INT_BITS BITSFUNC2(int, ELF_BITS, _t)
+
+#define ELF_BITS_XFORM2(bits, x) Elf##bits##_##x
+#define ELF_BITS_XFORM(bits, x) ELF_BITS_XFORM2(bits, x)
+#define ELF(x) ELF_BITS_XFORM(ELF_BITS, x)
+
+#define ELF_BITS 64
+#include "vdso2c.h"
+#undef ELF_BITS
+
+#define ELF_BITS 32
+#include "vdso2c.h"
+#undef ELF_BITS
+
+static void go(void *raw_addr, size_t raw_len,
+              void *stripped_addr, size_t stripped_len,
+              FILE *outfile, const char *name)
+{
+       Elf64_Ehdr *hdr = (Elf64_Ehdr *)raw_addr;
+
+       if (hdr->e_ident[EI_CLASS] == ELFCLASS64) {
+               go64(raw_addr, raw_len, stripped_addr, stripped_len,
+                    outfile, name);
+       } else if (hdr->e_ident[EI_CLASS] == ELFCLASS32) {
+               go32(raw_addr, raw_len, stripped_addr, stripped_len,
+                    outfile, name);
+       } else {
+               fail("unknown ELF class\n");
+       }
+}
+
+static void map_input(const char *name, void **addr, size_t *len, int prot)
+{
+       off_t tmp_len;
+
+       int fd = open(name, O_RDONLY);
+       if (fd == -1)
+               err(1, "%s", name);
+
+       tmp_len = lseek(fd, 0, SEEK_END);
+       if (tmp_len == (off_t)-1)
+               err(1, "lseek");
+       *len = (size_t)tmp_len;
+
+       *addr = mmap(NULL, tmp_len, prot, MAP_PRIVATE, fd, 0);
+       if (*addr == MAP_FAILED)
+               err(1, "mmap");
+
+       close(fd);
+}
+
+int main(int argc, char **argv)
+{
+       size_t raw_len, stripped_len;
+       void *raw_addr, *stripped_addr;
+       FILE *outfile;
+       char *name, *tmp;
+       int namelen;
+
+       if (argc != 4) {
+               printf("Usage: vdso2c RAW_INPUT STRIPPED_INPUT OUTPUT\n");
+               return 1;
+       }
+
+       /*
+        * Figure out the struct name.  If we're writing to a .so file,
+        * generate raw output insted.
+        */
+       name = strdup(argv[3]);
+       namelen = strlen(name);
+       if (namelen >= 3 && !strcmp(name + namelen - 3, ".so")) {
+               name = NULL;
+       } else {
+               tmp = strrchr(name, '/');
+               if (tmp)
+                       name = tmp + 1;
+               tmp = strchr(name, '.');
+               if (tmp)
+                       *tmp = '\0';
+               for (tmp = name; *tmp; tmp++)
+                       if (*tmp == '-')
+                               *tmp = '_';
+       }
+
+       map_input(argv[1], &raw_addr, &raw_len, PROT_READ);
+       map_input(argv[2], &stripped_addr, &stripped_len, PROT_READ);
+
+       outfilename = argv[3];
+       outfile = fopen(outfilename, "w");
+       if (!outfile)
+               err(1, "%s", argv[2]);
+
+       go(raw_addr, raw_len, stripped_addr, stripped_len, outfile, name);
+
+       munmap(raw_addr, raw_len);
+       munmap(stripped_addr, stripped_len);
+       fclose(outfile);
+
+       return 0;
+}
diff --git a/arch/x86/entry/vdso/vdso2c.h b/arch/x86/entry/vdso/vdso2c.h
new file mode 100644 (file)
index 0000000..0224987
--- /dev/null
@@ -0,0 +1,175 @@
+/*
+ * This file is included twice from vdso2c.c.  It generates code for 32-bit
+ * and 64-bit vDSOs.  We need both for 64-bit builds, since 32-bit vDSOs
+ * are built for 32-bit userspace.
+ */
+
+static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
+                        void *stripped_addr, size_t stripped_len,
+                        FILE *outfile, const char *name)
+{
+       int found_load = 0;
+       unsigned long load_size = -1;  /* Work around bogus warning */
+       unsigned long mapping_size;
+       ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
+       int i;
+       unsigned long j;
+       ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
+               *alt_sec = NULL;
+       ELF(Dyn) *dyn = 0, *dyn_end = 0;
+       const char *secstrings;
+       INT_BITS syms[NSYMS] = {};
+
+       ELF(Phdr) *pt = (ELF(Phdr) *)(raw_addr + GET_LE(&hdr->e_phoff));
+
+       /* Walk the segment table. */
+       for (i = 0; i < GET_LE(&hdr->e_phnum); i++) {
+               if (GET_LE(&pt[i].p_type) == PT_LOAD) {
+                       if (found_load)
+                               fail("multiple PT_LOAD segs\n");
+
+                       if (GET_LE(&pt[i].p_offset) != 0 ||
+                           GET_LE(&pt[i].p_vaddr) != 0)
+                               fail("PT_LOAD in wrong place\n");
+
+                       if (GET_LE(&pt[i].p_memsz) != GET_LE(&pt[i].p_filesz))
+                               fail("cannot handle memsz != filesz\n");
+
+                       load_size = GET_LE(&pt[i].p_memsz);
+                       found_load = 1;
+               } else if (GET_LE(&pt[i].p_type) == PT_DYNAMIC) {
+                       dyn = raw_addr + GET_LE(&pt[i].p_offset);
+                       dyn_end = raw_addr + GET_LE(&pt[i].p_offset) +
+                               GET_LE(&pt[i].p_memsz);
+               }
+       }
+       if (!found_load)
+               fail("no PT_LOAD seg\n");
+
+       if (stripped_len < load_size)
+               fail("stripped input is too short\n");
+
+       /* Walk the dynamic table */
+       for (i = 0; dyn + i < dyn_end &&
+                    GET_LE(&dyn[i].d_tag) != DT_NULL; i++) {
+               typeof(dyn[i].d_tag) tag = GET_LE(&dyn[i].d_tag);
+               if (tag == DT_REL || tag == DT_RELSZ || tag == DT_RELA ||
+                   tag == DT_RELENT || tag == DT_TEXTREL)
+                       fail("vdso image contains dynamic relocations\n");
+       }
+
+       /* Walk the section table */
+       secstrings_hdr = raw_addr + GET_LE(&hdr->e_shoff) +
+               GET_LE(&hdr->e_shentsize)*GET_LE(&hdr->e_shstrndx);
+       secstrings = raw_addr + GET_LE(&secstrings_hdr->sh_offset);
+       for (i = 0; i < GET_LE(&hdr->e_shnum); i++) {
+               ELF(Shdr) *sh = raw_addr + GET_LE(&hdr->e_shoff) +
+                       GET_LE(&hdr->e_shentsize) * i;
+               if (GET_LE(&sh->sh_type) == SHT_SYMTAB)
+                       symtab_hdr = sh;
+
+               if (!strcmp(secstrings + GET_LE(&sh->sh_name),
+                           ".altinstructions"))
+                       alt_sec = sh;
+       }
+
+       if (!symtab_hdr)
+               fail("no symbol table\n");
+
+       strtab_hdr = raw_addr + GET_LE(&hdr->e_shoff) +
+               GET_LE(&hdr->e_shentsize) * GET_LE(&symtab_hdr->sh_link);
+
+       /* Walk the symbol table */
+       for (i = 0;
+            i < GET_LE(&symtab_hdr->sh_size) / GET_LE(&symtab_hdr->sh_entsize);
+            i++) {
+               int k;
+               ELF(Sym) *sym = raw_addr + GET_LE(&symtab_hdr->sh_offset) +
+                       GET_LE(&symtab_hdr->sh_entsize) * i;
+               const char *name = raw_addr + GET_LE(&strtab_hdr->sh_offset) +
+                       GET_LE(&sym->st_name);
+
+               for (k = 0; k < NSYMS; k++) {
+                       if (!strcmp(name, required_syms[k].name)) {
+                               if (syms[k]) {
+                                       fail("duplicate symbol %s\n",
+                                            required_syms[k].name);
+                               }
+
+                               /*
+                                * Careful: we use negative addresses, but
+                                * st_value is unsigned, so we rely
+                                * on syms[k] being a signed type of the
+                                * correct width.
+                                */
+                               syms[k] = GET_LE(&sym->st_value);
+                       }
+               }
+       }
+
+       /* Validate mapping addresses. */
+       for (i = 0; i < sizeof(special_pages) / sizeof(special_pages[0]); i++) {
+               INT_BITS symval = syms[special_pages[i]];
+
+               if (!symval)
+                       continue;  /* The mapping isn't used; ignore it. */
+
+               if (symval % 4096)
+                       fail("%s must be a multiple of 4096\n",
+                            required_syms[i].name);
+               if (symval + 4096 < syms[sym_vvar_start])
+                       fail("%s underruns vvar_start\n",
+                            required_syms[i].name);
+               if (symval + 4096 > 0)
+                       fail("%s is on the wrong side of the vdso text\n",
+                            required_syms[i].name);
+       }
+       if (syms[sym_vvar_start] % 4096)
+               fail("vvar_begin must be a multiple of 4096\n");
+
+       if (!name) {
+               fwrite(stripped_addr, stripped_len, 1, outfile);
+               return;
+       }
+
+       mapping_size = (stripped_len + 4095) / 4096 * 4096;
+
+       fprintf(outfile, "/* AUTOMATICALLY GENERATED -- DO NOT EDIT */\n\n");
+       fprintf(outfile, "#include <linux/linkage.h>\n");
+       fprintf(outfile, "#include <asm/page_types.h>\n");
+       fprintf(outfile, "#include <asm/vdso.h>\n");
+       fprintf(outfile, "\n");
+       fprintf(outfile,
+               "static unsigned char raw_data[%lu] __page_aligned_data = {",
+               mapping_size);
+       for (j = 0; j < stripped_len; j++) {
+               if (j % 10 == 0)
+                       fprintf(outfile, "\n\t");
+               fprintf(outfile, "0x%02X, ",
+                       (int)((unsigned char *)stripped_addr)[j]);
+       }
+       fprintf(outfile, "\n};\n\n");
+
+       fprintf(outfile, "static struct page *pages[%lu];\n\n",
+               mapping_size / 4096);
+
+       fprintf(outfile, "const struct vdso_image %s = {\n", name);
+       fprintf(outfile, "\t.data = raw_data,\n");
+       fprintf(outfile, "\t.size = %lu,\n", mapping_size);
+       fprintf(outfile, "\t.text_mapping = {\n");
+       fprintf(outfile, "\t\t.name = \"[vdso]\",\n");
+       fprintf(outfile, "\t\t.pages = pages,\n");
+       fprintf(outfile, "\t},\n");
+       if (alt_sec) {
+               fprintf(outfile, "\t.alt = %lu,\n",
+                       (unsigned long)GET_LE(&alt_sec->sh_offset));
+               fprintf(outfile, "\t.alt_len = %lu,\n",
+                       (unsigned long)GET_LE(&alt_sec->sh_size));
+       }
+       for (i = 0; i < NSYMS; i++) {
+               if (required_syms[i].export && syms[i])
+                       fprintf(outfile, "\t.sym_%s = %" PRIi64 ",\n",
+                               required_syms[i].name, (int64_t)syms[i]);
+       }
+       fprintf(outfile, "};\n");
+}
diff --git a/arch/x86/entry/vdso/vdso32-setup.c b/arch/x86/entry/vdso/vdso32-setup.c
new file mode 100644 (file)
index 0000000..e904c27
--- /dev/null
@@ -0,0 +1,120 @@
+/*
+ * (C) Copyright 2002 Linus Torvalds
+ * Portions based on the vdso-randomization code from exec-shield:
+ * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
+ *
+ * This file contains the needed initializations to support sysenter.
+ */
+
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/mm_types.h>
+
+#include <asm/cpufeature.h>
+#include <asm/processor.h>
+#include <asm/vdso.h>
+
+#ifdef CONFIG_COMPAT_VDSO
+#define VDSO_DEFAULT   0
+#else
+#define VDSO_DEFAULT   1
+#endif
+
+/*
+ * Should the kernel map a VDSO page into processes and pass its
+ * address down to glibc upon exec()?
+ */
+unsigned int __read_mostly vdso32_enabled = VDSO_DEFAULT;
+
+static int __init vdso32_setup(char *s)
+{
+       vdso32_enabled = simple_strtoul(s, NULL, 0);
+
+       if (vdso32_enabled > 1)
+               pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n");
+
+       return 1;
+}
+
+/*
+ * For consistency, the argument vdso32=[012] affects the 32-bit vDSO
+ * behavior on both 64-bit and 32-bit kernels.
+ * On 32-bit kernels, vdso=[012] means the same thing.
+ */
+__setup("vdso32=", vdso32_setup);
+
+#ifdef CONFIG_X86_32
+__setup_param("vdso=", vdso_setup, vdso32_setup, 0);
+#endif
+
+#ifdef CONFIG_X86_64
+
+#define        vdso32_sysenter()       (boot_cpu_has(X86_FEATURE_SYSENTER32))
+#define        vdso32_syscall()        (boot_cpu_has(X86_FEATURE_SYSCALL32))
+
+#else  /* CONFIG_X86_32 */
+
+#define vdso32_sysenter()      (boot_cpu_has(X86_FEATURE_SEP))
+#define vdso32_syscall()       (0)
+
+#endif /* CONFIG_X86_64 */
+
+#if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
+const struct vdso_image *selected_vdso32;
+#endif
+
+int __init sysenter_setup(void)
+{
+#ifdef CONFIG_COMPAT
+       if (vdso32_syscall())
+               selected_vdso32 = &vdso_image_32_syscall;
+       else
+#endif
+       if (vdso32_sysenter())
+               selected_vdso32 = &vdso_image_32_sysenter;
+       else
+               selected_vdso32 = &vdso_image_32_int80;
+
+       init_vdso_image(selected_vdso32);
+
+       return 0;
+}
+
+#ifdef CONFIG_X86_64
+
+subsys_initcall(sysenter_setup);
+
+#ifdef CONFIG_SYSCTL
+/* Register vsyscall32 into the ABI table */
+#include <linux/sysctl.h>
+
+static struct ctl_table abi_table2[] = {
+       {
+               .procname       = "vsyscall32",
+               .data           = &vdso32_enabled,
+               .maxlen         = sizeof(int),
+               .mode           = 0644,
+               .proc_handler   = proc_dointvec
+       },
+       {}
+};
+
+static struct ctl_table abi_root_table2[] = {
+       {
+               .procname = "abi",
+               .mode = 0555,
+               .child = abi_table2
+       },
+       {}
+};
+
+static __init int ia32_binfmt_init(void)
+{
+       register_sysctl_table(abi_root_table2);
+       return 0;
+}
+__initcall(ia32_binfmt_init);
+#endif /* CONFIG_SYSCTL */
+
+#endif /* CONFIG_X86_64 */
diff --git a/arch/x86/entry/vdso/vdso32/.gitignore b/arch/x86/entry/vdso/vdso32/.gitignore
new file mode 100644 (file)
index 0000000..e45fba9
--- /dev/null
@@ -0,0 +1 @@
+vdso32.lds
diff --git a/arch/x86/entry/vdso/vdso32/int80.S b/arch/x86/entry/vdso/vdso32/int80.S
new file mode 100644 (file)
index 0000000..b15b7c0
--- /dev/null
@@ -0,0 +1,56 @@
+/*
+ * Code for the vDSO.  This version uses the old int $0x80 method.
+ *
+ * First get the common code for the sigreturn entry points.
+ * This must come first.
+ */
+#include "sigreturn.S"
+
+       .text
+       .globl __kernel_vsyscall
+       .type __kernel_vsyscall,@function
+       ALIGN
+__kernel_vsyscall:
+.LSTART_vsyscall:
+       int $0x80
+       ret
+.LEND_vsyscall:
+       .size __kernel_vsyscall,.-.LSTART_vsyscall
+       .previous
+
+       .section .eh_frame,"a",@progbits
+.LSTARTFRAMEDLSI:
+       .long .LENDCIEDLSI-.LSTARTCIEDLSI
+.LSTARTCIEDLSI:
+       .long 0                 /* CIE ID */
+       .byte 1                 /* Version number */
+       .string "zR"            /* NUL-terminated augmentation string */
+       .uleb128 1              /* Code alignment factor */
+       .sleb128 -4             /* Data alignment factor */
+       .byte 8                 /* Return address register column */
+       .uleb128 1              /* Augmentation value length */
+       .byte 0x1b              /* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
+       .byte 0x0c              /* DW_CFA_def_cfa */
+       .uleb128 4
+       .uleb128 4
+       .byte 0x88              /* DW_CFA_offset, column 0x8 */
+       .uleb128 1
+       .align 4
+.LENDCIEDLSI:
+       .long .LENDFDEDLSI-.LSTARTFDEDLSI /* Length FDE */
+.LSTARTFDEDLSI:
+       .long .LSTARTFDEDLSI-.LSTARTFRAMEDLSI /* CIE pointer */
+       .long .LSTART_vsyscall-.        /* PC-relative start address */
+       .long .LEND_vsyscall-.LSTART_vsyscall
+       .uleb128 0
+       .align 4
+.LENDFDEDLSI:
+       .previous
+
+       /*
+        * Pad out the segment to match the size of the sysenter.S version.
+        */
+VDSO32_vsyscall_eh_frame_size = 0x40
+       .section .data,"aw",@progbits
+       .space VDSO32_vsyscall_eh_frame_size-(.LENDFDEDLSI-.LSTARTFRAMEDLSI), 0
+       .previous
diff --git a/arch/x86/entry/vdso/vdso32/note.S b/arch/x86/entry/vdso/vdso32/note.S
new file mode 100644 (file)
index 0000000..c83f257
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
+ * Here we can supply some information useful to userland.
+ */
+
+#include <linux/version.h>
+#include <linux/elfnote.h>
+
+/* Ideally this would use UTS_NAME, but using a quoted string here
+   doesn't work. Remember to change this when changing the
+   kernel's name. */
+ELFNOTE_START(Linux, 0, "a")
+       .long LINUX_VERSION_CODE
+ELFNOTE_END
+
+#ifdef CONFIG_XEN
+/*
+ * Add a special note telling glibc's dynamic linker a fake hardware
+ * flavor that it will use to choose the search path for libraries in the
+ * same way it uses real hardware capabilities like "mmx".
+ * We supply "nosegneg" as the fake capability, to indicate that we
+ * do not like negative offsets in instructions using segment overrides,
+ * since we implement those inefficiently.  This makes it possible to
+ * install libraries optimized to avoid those access patterns in someplace
+ * like /lib/i686/tls/nosegneg.  Note that an /etc/ld.so.conf.d/file
+ * corresponding to the bits here is needed to make ldconfig work right.
+ * It should contain:
+ *     hwcap 1 nosegneg
+ * to match the mapping of bit to name that we give here.
+ *
+ * At runtime, the fake hardware feature will be considered to be present
+ * if its bit is set in the mask word.  So, we start with the mask 0, and
+ * at boot time we set VDSO_NOTE_NONEGSEG_BIT if running under Xen.
+ */
+
+#include "../../xen/vdso.h"    /* Defines VDSO_NOTE_NONEGSEG_BIT.  */
+
+ELFNOTE_START(GNU, 2, "a")
+       .long 1                 /* ncaps */
+VDSO32_NOTE_MASK:              /* Symbol used by arch/x86/xen/setup.c */
+       .long 0                 /* mask */
+       .byte VDSO_NOTE_NONEGSEG_BIT; .asciz "nosegneg" /* bit, name */
+ELFNOTE_END
+#endif
diff --git a/arch/x86/entry/vdso/vdso32/sigreturn.S b/arch/x86/entry/vdso/vdso32/sigreturn.S
new file mode 100644 (file)
index 0000000..d7ec4e2
--- /dev/null
@@ -0,0 +1,145 @@
+/*
+ * Common code for the sigreturn entry points in vDSO images.
+ * So far this code is the same for both int80 and sysenter versions.
+ * This file is #include'd by int80.S et al to define them first thing.
+ * The kernel assumes that the addresses of these routines are constant
+ * for all vDSO implementations.
+ */
+
+#include <linux/linkage.h>
+#include <asm/unistd_32.h>
+#include <asm/asm-offsets.h>
+
+#ifndef SYSCALL_ENTER_KERNEL
+#define        SYSCALL_ENTER_KERNEL    int $0x80
+#endif
+
+       .text
+       .globl __kernel_sigreturn
+       .type __kernel_sigreturn,@function
+       nop /* this guy is needed for .LSTARTFDEDLSI1 below (watch for HACK) */
+       ALIGN
+__kernel_sigreturn:
+.LSTART_sigreturn:
+       popl %eax               /* XXX does this mean it needs unwind info? */
+       movl $__NR_sigreturn, %eax
+       SYSCALL_ENTER_KERNEL
+.LEND_sigreturn:
+       nop
+       .size __kernel_sigreturn,.-.LSTART_sigreturn
+
+       .globl __kernel_rt_sigreturn
+       .type __kernel_rt_sigreturn,@function
+       ALIGN
+__kernel_rt_sigreturn:
+.LSTART_rt_sigreturn:
+       movl $__NR_rt_sigreturn, %eax
+       SYSCALL_ENTER_KERNEL
+.LEND_rt_sigreturn:
+       nop
+       .size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn
+       .previous
+
+       .section .eh_frame,"a",@progbits
+.LSTARTFRAMEDLSI1:
+       .long .LENDCIEDLSI1-.LSTARTCIEDLSI1
+.LSTARTCIEDLSI1:
+       .long 0                 /* CIE ID */
+       .byte 1                 /* Version number */
+       .string "zRS"           /* NUL-terminated augmentation string */
+       .uleb128 1              /* Code alignment factor */
+       .sleb128 -4             /* Data alignment factor */
+       .byte 8                 /* Return address register column */
+       .uleb128 1              /* Augmentation value length */
+       .byte 0x1b              /* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
+       .byte 0                 /* DW_CFA_nop */
+       .align 4
+.LENDCIEDLSI1:
+       .long .LENDFDEDLSI1-.LSTARTFDEDLSI1 /* Length FDE */
+.LSTARTFDEDLSI1:
+       .long .LSTARTFDEDLSI1-.LSTARTFRAMEDLSI1 /* CIE pointer */
+       /* HACK: The dwarf2 unwind routines will subtract 1 from the
+          return address to get an address in the middle of the
+          presumed call instruction.  Since we didn't get here via
+          a call, we need to include the nop before the real start
+          to make up for it.  */
+       .long .LSTART_sigreturn-1-.     /* PC-relative start address */
+       .long .LEND_sigreturn-.LSTART_sigreturn+1
+       .uleb128 0                      /* Augmentation */
+       /* What follows are the instructions for the table generation.
+          We record the locations of each register saved.  This is
+          complicated by the fact that the "CFA" is always assumed to
+          be the value of the stack pointer in the caller.  This means
+          that we must define the CFA of this body of code to be the
+          saved value of the stack pointer in the sigcontext.  Which
+          also means that there is no fixed relation to the other
+          saved registers, which means that we must use DW_CFA_expression
+          to compute their addresses.  It also means that when we
+          adjust the stack with the popl, we have to do it all over again.  */
+
+#define do_cfa_expr(offset)                                            \
+       .byte 0x0f;                     /* DW_CFA_def_cfa_expression */ \
+       .uleb128 1f-0f;                 /*   length */                  \
+0:     .byte 0x74;                     /*     DW_OP_breg4 */           \
+       .sleb128 offset;                /*      offset */               \
+       .byte 0x06;                     /*     DW_OP_deref */           \
+1:
+
+#define do_expr(regno, offset)                                         \
+       .byte 0x10;                     /* DW_CFA_expression */         \
+       .uleb128 regno;                 /*   regno */                   \
+       .uleb128 1f-0f;                 /*   length */                  \
+0:     .byte 0x74;                     /*     DW_OP_breg4 */           \
+       .sleb128 offset;                /*       offset */              \
+1:
+
+       do_cfa_expr(IA32_SIGCONTEXT_sp+4)
+       do_expr(0, IA32_SIGCONTEXT_ax+4)
+       do_expr(1, IA32_SIGCONTEXT_cx+4)
+       do_expr(2, IA32_SIGCONTEXT_dx+4)
+       do_expr(3, IA32_SIGCONTEXT_bx+4)
+       do_expr(5, IA32_SIGCONTEXT_bp+4)
+       do_expr(6, IA32_SIGCONTEXT_si+4)
+       do_expr(7, IA32_SIGCONTEXT_di+4)
+       do_expr(8, IA32_SIGCONTEXT_ip+4)
+
+       .byte 0x42      /* DW_CFA_advance_loc 2 -- nop; popl eax. */
+
+       do_cfa_expr(IA32_SIGCONTEXT_sp)
+       do_expr(0, IA32_SIGCONTEXT_ax)
+       do_expr(1, IA32_SIGCONTEXT_cx)
+       do_expr(2, IA32_SIGCONTEXT_dx)
+       do_expr(3, IA32_SIGCONTEXT_bx)
+       do_expr(5, IA32_SIGCONTEXT_bp)
+       do_expr(6, IA32_SIGCONTEXT_si)
+       do_expr(7, IA32_SIGCONTEXT_di)
+       do_expr(8, IA32_SIGCONTEXT_ip)
+
+       .align 4
+.LENDFDEDLSI1:
+
+       .long .LENDFDEDLSI2-.LSTARTFDEDLSI2 /* Length FDE */
+.LSTARTFDEDLSI2:
+       .long .LSTARTFDEDLSI2-.LSTARTFRAMEDLSI1 /* CIE pointer */
+       /* HACK: See above wrt unwind library assumptions.  */
+       .long .LSTART_rt_sigreturn-1-.  /* PC-relative start address */
+       .long .LEND_rt_sigreturn-.LSTART_rt_sigreturn+1
+       .uleb128 0                      /* Augmentation */
+       /* What follows are the instructions for the table generation.
+          We record the locations of each register saved.  This is
+          slightly less complicated than the above, since we don't
+          modify the stack pointer in the process.  */
+
+       do_cfa_expr(IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_sp)
+       do_expr(0, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_ax)
+       do_expr(1, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_cx)
+       do_expr(2, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_dx)
+       do_expr(3, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_bx)
+       do_expr(5, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_bp)
+       do_expr(6, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_si)
+       do_expr(7, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_di)
+       do_expr(8, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_ip)
+
+       .align 4
+.LENDFDEDLSI2:
+       .previous
diff --git a/arch/x86/entry/vdso/vdso32/syscall.S b/arch/x86/entry/vdso/vdso32/syscall.S
new file mode 100644 (file)
index 0000000..6b286bb
--- /dev/null
@@ -0,0 +1,75 @@
+/*
+ * Code for the vDSO.  This version uses the syscall instruction.
+ *
+ * First get the common code for the sigreturn entry points.
+ * This must come first.
+ */
+#define SYSCALL_ENTER_KERNEL   syscall
+#include "sigreturn.S"
+
+#include <asm/segment.h>
+
+       .text
+       .globl __kernel_vsyscall
+       .type __kernel_vsyscall,@function
+       ALIGN
+__kernel_vsyscall:
+.LSTART_vsyscall:
+       push    %ebp
+.Lpush_ebp:
+       movl    %ecx, %ebp
+       syscall
+       movl    %ebp, %ecx
+       popl    %ebp
+.Lpop_ebp:
+       ret
+.LEND_vsyscall:
+       .size __kernel_vsyscall,.-.LSTART_vsyscall
+
+       .section .eh_frame,"a",@progbits
+.LSTARTFRAME:
+       .long .LENDCIE-.LSTARTCIE
+.LSTARTCIE:
+       .long 0                 /* CIE ID */
+       .byte 1                 /* Version number */
+       .string "zR"            /* NUL-terminated augmentation string */
+       .uleb128 1              /* Code alignment factor */
+       .sleb128 -4             /* Data alignment factor */
+       .byte 8                 /* Return address register column */
+       .uleb128 1              /* Augmentation value length */
+       .byte 0x1b              /* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
+       .byte 0x0c              /* DW_CFA_def_cfa */
+       .uleb128 4
+       .uleb128 4
+       .byte 0x88              /* DW_CFA_offset, column 0x8 */
+       .uleb128 1
+       .align 4
+.LENDCIE:
+
+       .long .LENDFDE1-.LSTARTFDE1     /* Length FDE */
+.LSTARTFDE1:
+       .long .LSTARTFDE1-.LSTARTFRAME  /* CIE pointer */
+       .long .LSTART_vsyscall-.        /* PC-relative start address */
+       .long .LEND_vsyscall-.LSTART_vsyscall
+       .uleb128 0                      /* Augmentation length */
+       /* What follows are the instructions for the table generation.
+          We have to record all changes of the stack pointer.  */
+       .byte 0x40 + .Lpush_ebp-.LSTART_vsyscall /* DW_CFA_advance_loc */
+       .byte 0x0e              /* DW_CFA_def_cfa_offset */
+       .uleb128 8
+       .byte 0x85, 0x02        /* DW_CFA_offset %ebp -8 */
+       .byte 0x40 + .Lpop_ebp-.Lpush_ebp /* DW_CFA_advance_loc */
+       .byte 0xc5              /* DW_CFA_restore %ebp */
+       .byte 0x0e              /* DW_CFA_def_cfa_offset */
+       .uleb128 4
+       .align 4
+.LENDFDE1:
+       .previous
+
+       /*
+        * Pad out the segment to match the size of the sysenter.S version.
+        */
+VDSO32_vsyscall_eh_frame_size = 0x40
+       .section .data,"aw",@progbits
+       .space VDSO32_vsyscall_eh_frame_size-(.LENDFDE1-.LSTARTFRAME), 0
+       .previous
diff --git a/arch/x86/entry/vdso/vdso32/sysenter.S b/arch/x86/entry/vdso/vdso32/sysenter.S
new file mode 100644 (file)
index 0000000..e354bce
--- /dev/null
@@ -0,0 +1,116 @@
+/*
+ * Code for the vDSO.  This version uses the sysenter instruction.
+ *
+ * First get the common code for the sigreturn entry points.
+ * This must come first.
+ */
+#include "sigreturn.S"
+
+/*
+ * The caller puts arg2 in %ecx, which gets pushed. The kernel will use
+ * %ecx itself for arg2. The pushing is because the sysexit instruction
+ * (found in entry.S) requires that we clobber %ecx with the desired %esp.
+ * User code might expect that %ecx is unclobbered though, as it would be
+ * for returning via the iret instruction, so we must push and pop.
+ *
+ * The caller puts arg3 in %edx, which the sysexit instruction requires
+ * for %eip. Thus, exactly as for arg2, we must push and pop.
+ *
+ * Arg6 is different. The caller puts arg6 in %ebp. Since the sysenter
+ * instruction clobbers %esp, the user's %esp won't even survive entry
+ * into the kernel. We store %esp in %ebp. Code in entry.S must fetch
+ * arg6 from the stack.
+ *
+ * You can not use this vsyscall for the clone() syscall because the
+ * three words on the parent stack do not get copied to the child.
+ */
+       .text
+       .globl __kernel_vsyscall
+       .type __kernel_vsyscall,@function
+       ALIGN
+__kernel_vsyscall:
+.LSTART_vsyscall:
+       push %ecx
+.Lpush_ecx:
+       push %edx
+.Lpush_edx:
+       push %ebp
+.Lenter_kernel:
+       movl %esp,%ebp
+       sysenter
+
+       /* 7: align return point with nop's to make disassembly easier */
+       .space 7,0x90
+
+       /* 14: System call restart point is here! (SYSENTER_RETURN-2) */
+       int $0x80
+       /* 16: System call normal return point is here! */
+VDSO32_SYSENTER_RETURN:        /* Symbol used by sysenter.c via vdso32-syms.h */
+       pop %ebp
+.Lpop_ebp:
+       pop %edx
+.Lpop_edx:
+       pop %ecx
+.Lpop_ecx:
+       ret
+.LEND_vsyscall:
+       .size __kernel_vsyscall,.-.LSTART_vsyscall
+       .previous
+
+       .section .eh_frame,"a",@progbits
+.LSTARTFRAMEDLSI:
+       .long .LENDCIEDLSI-.LSTARTCIEDLSI
+.LSTARTCIEDLSI:
+       .long 0                 /* CIE ID */
+       .byte 1                 /* Version number */
+       .string "zR"            /* NUL-terminated augmentation string */
+       .uleb128 1              /* Code alignment factor */
+       .sleb128 -4             /* Data alignment factor */
+       .byte 8                 /* Return address register column */
+       .uleb128 1              /* Augmentation value length */
+       .byte 0x1b              /* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
+       .byte 0x0c              /* DW_CFA_def_cfa */
+       .uleb128 4
+       .uleb128 4
+       .byte 0x88              /* DW_CFA_offset, column 0x8 */
+       .uleb128 1
+       .align 4
+.LENDCIEDLSI:
+       .long .LENDFDEDLSI-.LSTARTFDEDLSI /* Length FDE */
+.LSTARTFDEDLSI:
+       .long .LSTARTFDEDLSI-.LSTARTFRAMEDLSI /* CIE pointer */
+       .long .LSTART_vsyscall-.        /* PC-relative start address */
+       .long .LEND_vsyscall-.LSTART_vsyscall
+       .uleb128 0
+       /* What follows are the instructions for the table generation.
+          We have to record all changes of the stack pointer.  */
+       .byte 0x40 + (.Lpush_ecx-.LSTART_vsyscall) /* DW_CFA_advance_loc */
+       .byte 0x0e              /* DW_CFA_def_cfa_offset */
+       .byte 0x08              /* RA at offset 8 now */
+       .byte 0x40 + (.Lpush_edx-.Lpush_ecx) /* DW_CFA_advance_loc */
+       .byte 0x0e              /* DW_CFA_def_cfa_offset */
+       .byte 0x0c              /* RA at offset 12 now */
+       .byte 0x40 + (.Lenter_kernel-.Lpush_edx) /* DW_CFA_advance_loc */
+       .byte 0x0e              /* DW_CFA_def_cfa_offset */
+       .byte 0x10              /* RA at offset 16 now */
+       .byte 0x85, 0x04        /* DW_CFA_offset %ebp -16 */
+       /* Finally the epilogue.  */
+       .byte 0x40 + (.Lpop_ebp-.Lenter_kernel) /* DW_CFA_advance_loc */
+       .byte 0x0e              /* DW_CFA_def_cfa_offset */
+       .byte 0x0c              /* RA at offset 12 now */
+       .byte 0xc5              /* DW_CFA_restore %ebp */
+       .byte 0x40 + (.Lpop_edx-.Lpop_ebp) /* DW_CFA_advance_loc */
+       .byte 0x0e              /* DW_CFA_def_cfa_offset */
+       .byte 0x08              /* RA at offset 8 now */
+       .byte 0x40 + (.Lpop_ecx-.Lpop_edx) /* DW_CFA_advance_loc */
+       .byte 0x0e              /* DW_CFA_def_cfa_offset */
+       .byte 0x04              /* RA at offset 4 now */
+       .align 4
+.LENDFDEDLSI:
+       .previous
+
+       /*
+        * Emit a symbol with the size of this .eh_frame data,
+        * to verify it matches the other versions.
+        */
+VDSO32_vsyscall_eh_frame_size = (.LENDFDEDLSI-.LSTARTFRAMEDLSI)
diff --git a/arch/x86/entry/vdso/vdso32/vclock_gettime.c b/arch/x86/entry/vdso/vdso32/vclock_gettime.c
new file mode 100644 (file)
index 0000000..175cc72
--- /dev/null
@@ -0,0 +1,30 @@
+#define BUILD_VDSO32
+
+#ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
+#undef CONFIG_OPTIMIZE_INLINING
+#endif
+
+#undef CONFIG_X86_PPRO_FENCE
+
+#ifdef CONFIG_X86_64
+
+/*
+ * in case of a 32 bit VDSO for a 64 bit kernel fake a 32 bit kernel
+ * configuration
+ */
+#undef CONFIG_64BIT
+#undef CONFIG_X86_64
+#undef CONFIG_ILLEGAL_POINTER_VALUE
+#undef CONFIG_SPARSEMEM_VMEMMAP
+#undef CONFIG_NR_CPUS
+
+#define CONFIG_X86_32 1
+#define CONFIG_PAGE_OFFSET 0
+#define CONFIG_ILLEGAL_POINTER_VALUE 0
+#define CONFIG_NR_CPUS 1
+
+#define BUILD_VDSO32_64
+
+#endif
+
+#include "../vclock_gettime.c"
diff --git a/arch/x86/entry/vdso/vdso32/vdso-fakesections.c b/arch/x86/entry/vdso/vdso32/vdso-fakesections.c
new file mode 100644 (file)
index 0000000..541468e
--- /dev/null
@@ -0,0 +1 @@
+#include "../vdso-fakesections.c"
diff --git a/arch/x86/entry/vdso/vdso32/vdso32.lds.S b/arch/x86/entry/vdso/vdso32/vdso32.lds.S
new file mode 100644 (file)
index 0000000..31056cf
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * Linker script for 32-bit vDSO.
+ * We #include the file to define the layout details.
+ *
+ * This file defines the version script giving the user-exported symbols in
+ * the DSO.
+ */
+
+#include <asm/page.h>
+
+#define BUILD_VDSO32
+
+#include "../vdso-layout.lds.S"
+
+/* The ELF entry point can be used to set the AT_SYSINFO value.  */
+ENTRY(__kernel_vsyscall);
+
+/*
+ * This controls what userland symbols we export from the vDSO.
+ */
+VERSION
+{
+       LINUX_2.6 {
+       global:
+               __vdso_clock_gettime;
+               __vdso_gettimeofday;
+               __vdso_time;
+       };
+
+       LINUX_2.5 {
+       global:
+               __kernel_vsyscall;
+               __kernel_sigreturn;
+               __kernel_rt_sigreturn;
+       local: *;
+       };
+}
diff --git a/arch/x86/entry/vdso/vdsox32.lds.S b/arch/x86/entry/vdso/vdsox32.lds.S
new file mode 100644 (file)
index 0000000..697c11e
--- /dev/null
@@ -0,0 +1,25 @@
+/*
+ * Linker script for x32 vDSO.
+ * We #include the file to define the layout details.
+ *
+ * This file defines the version script giving the user-exported symbols in
+ * the DSO.
+ */
+
+#define BUILD_VDSOX32
+
+#include "vdso-layout.lds.S"
+
+/*
+ * This controls what userland symbols we export from the vDSO.
+ */
+VERSION {
+       LINUX_2.6 {
+       global:
+               __vdso_clock_gettime;
+               __vdso_gettimeofday;
+               __vdso_getcpu;
+               __vdso_time;
+       local: *;
+       };
+}
diff --git a/arch/x86/entry/vdso/vgetcpu.c b/arch/x86/entry/vdso/vgetcpu.c
new file mode 100644 (file)
index 0000000..8ec3d1f
--- /dev/null
@@ -0,0 +1,28 @@
+/*
+ * Copyright 2006 Andi Kleen, SUSE Labs.
+ * Subject to the GNU Public License, v.2
+ *
+ * Fast user context implementation of getcpu()
+ */
+
+#include <linux/kernel.h>
+#include <linux/getcpu.h>
+#include <linux/time.h>
+#include <asm/vgtod.h>
+
+notrace long
+__vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
+{
+       unsigned int p;
+
+       p = __getcpu();
+
+       if (cpu)
+               *cpu = p & VGETCPU_CPU_MASK;
+       if (node)
+               *node = p >> 12;
+       return 0;
+}
+
+long getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
+       __attribute__((weak, alias("__vdso_getcpu")));
diff --git a/arch/x86/entry/vdso/vma.c b/arch/x86/entry/vdso/vma.c
new file mode 100644 (file)
index 0000000..1c9f750
--- /dev/null
@@ -0,0 +1,300 @@
+/*
+ * Copyright 2007 Andi Kleen, SUSE Labs.
+ * Subject to the GPL, v.2
+ *
+ * This contains most of the x86 vDSO kernel-side code.
+ */
+#include <linux/mm.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/random.h>
+#include <linux/elf.h>
+#include <linux/cpu.h>
+#include <asm/vgtod.h>
+#include <asm/proto.h>
+#include <asm/vdso.h>
+#include <asm/vvar.h>
+#include <asm/page.h>
+#include <asm/hpet.h>
+#include <asm/desc.h>
+
+#if defined(CONFIG_X86_64)
+unsigned int __read_mostly vdso64_enabled = 1;
+#endif
+
+void __init init_vdso_image(const struct vdso_image *image)
+{
+       int i;
+       int npages = (image->size) / PAGE_SIZE;
+
+       BUG_ON(image->size % PAGE_SIZE != 0);
+       for (i = 0; i < npages; i++)
+               image->text_mapping.pages[i] =
+                       virt_to_page(image->data + i*PAGE_SIZE);
+
+       apply_alternatives((struct alt_instr *)(image->data + image->alt),
+                          (struct alt_instr *)(image->data + image->alt +
+                                               image->alt_len));
+}
+
+struct linux_binprm;
+
+/*
+ * Put the vdso above the (randomized) stack with another randomized
+ * offset.  This way there is no hole in the middle of address space.
+ * To save memory make sure it is still in the same PTE as the stack
+ * top.  This doesn't give that many random bits.
+ *
+ * Note that this algorithm is imperfect: the distribution of the vdso
+ * start address within a PMD is biased toward the end.
+ *
+ * Only used for the 64-bit and x32 vdsos.
+ */
+static unsigned long vdso_addr(unsigned long start, unsigned len)
+{
+#ifdef CONFIG_X86_32
+       return 0;
+#else
+       unsigned long addr, end;
+       unsigned offset;
+
+       /*
+        * Round up the start address.  It can start out unaligned as a result
+        * of stack start randomization.
+        */
+       start = PAGE_ALIGN(start);
+
+       /* Round the lowest possible end address up to a PMD boundary. */
+       end = (start + len + PMD_SIZE - 1) & PMD_MASK;
+       if (end >= TASK_SIZE_MAX)
+               end = TASK_SIZE_MAX;
+       end -= len;
+
+       if (end > start) {
+               offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
+               addr = start + (offset << PAGE_SHIFT);
+       } else {
+               addr = start;
+       }
+
+       /*
+        * Forcibly align the final address in case we have a hardware
+        * issue that requires alignment for performance reasons.
+        */
+       addr = align_vdso_addr(addr);
+
+       return addr;
+#endif
+}
+
+static int map_vdso(const struct vdso_image *image, bool calculate_addr)
+{
+       struct mm_struct *mm = current->mm;
+       struct vm_area_struct *vma;
+       unsigned long addr, text_start;
+       int ret = 0;
+       static struct page *no_pages[] = {NULL};
+       static struct vm_special_mapping vvar_mapping = {
+               .name = "[vvar]",
+               .pages = no_pages,
+       };
+
+       if (calculate_addr) {
+               addr = vdso_addr(current->mm->start_stack,
+                                image->size - image->sym_vvar_start);
+       } else {
+               addr = 0;
+       }
+
+       down_write(&mm->mmap_sem);
+
+       addr = get_unmapped_area(NULL, addr,
+                                image->size - image->sym_vvar_start, 0, 0);
+       if (IS_ERR_VALUE(addr)) {
+               ret = addr;
+               goto up_fail;
+       }
+
+       text_start = addr - image->sym_vvar_start;
+       current->mm->context.vdso = (void __user *)text_start;
+
+       /*
+        * MAYWRITE to allow gdb to COW and set breakpoints
+        */
+       vma = _install_special_mapping(mm,
+                                      text_start,
+                                      image->size,
+                                      VM_READ|VM_EXEC|
+                                      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+                                      &image->text_mapping);
+
+       if (IS_ERR(vma)) {
+               ret = PTR_ERR(vma);
+               goto up_fail;
+       }
+
+       vma = _install_special_mapping(mm,
+                                      addr,
+                                      -image->sym_vvar_start,
+                                      VM_READ|VM_MAYREAD,
+                                      &vvar_mapping);
+
+       if (IS_ERR(vma)) {
+               ret = PTR_ERR(vma);
+               goto up_fail;
+       }
+
+       if (image->sym_vvar_page)
+               ret = remap_pfn_range(vma,
+                                     text_start + image->sym_vvar_page,
+                                     __pa_symbol(&__vvar_page) >> PAGE_SHIFT,
+                                     PAGE_SIZE,
+                                     PAGE_READONLY);
+
+       if (ret)
+               goto up_fail;
+
+#ifdef CONFIG_HPET_TIMER
+       if (hpet_address && image->sym_hpet_page) {
+               ret = io_remap_pfn_range(vma,
+                       text_start + image->sym_hpet_page,
+                       hpet_address >> PAGE_SHIFT,
+                       PAGE_SIZE,
+                       pgprot_noncached(PAGE_READONLY));
+
+               if (ret)
+                       goto up_fail;
+       }
+#endif
+
+up_fail:
+       if (ret)
+               current->mm->context.vdso = NULL;
+
+       up_write(&mm->mmap_sem);
+       return ret;
+}
+
+#if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
+static int load_vdso32(void)
+{
+       int ret;
+
+       if (vdso32_enabled != 1)  /* Other values all mean "disabled" */
+               return 0;
+
+       ret = map_vdso(selected_vdso32, false);
+       if (ret)
+               return ret;
+
+       if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
+               current_thread_info()->sysenter_return =
+                       current->mm->context.vdso +
+                       selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
+
+       return 0;
+}
+#endif
+
+#ifdef CONFIG_X86_64
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+       if (!vdso64_enabled)
+               return 0;
+
+       return map_vdso(&vdso_image_64, true);
+}
+
+#ifdef CONFIG_COMPAT
+int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
+                                      int uses_interp)
+{
+#ifdef CONFIG_X86_X32_ABI
+       if (test_thread_flag(TIF_X32)) {
+               if (!vdso64_enabled)
+                       return 0;
+
+               return map_vdso(&vdso_image_x32, true);
+       }
+#endif
+
+       return load_vdso32();
+}
+#endif
+#else
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+       return load_vdso32();
+}
+#endif
+
+#ifdef CONFIG_X86_64
+static __init int vdso_setup(char *s)
+{
+       vdso64_enabled = simple_strtoul(s, NULL, 0);
+       return 0;
+}
+__setup("vdso=", vdso_setup);
+#endif
+
+#ifdef CONFIG_X86_64
+static void vgetcpu_cpu_init(void *arg)
+{
+       int cpu = smp_processor_id();
+       struct desc_struct d = { };
+       unsigned long node = 0;
+#ifdef CONFIG_NUMA
+       node = cpu_to_node(cpu);
+#endif
+       if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
+               write_rdtscp_aux((node << 12) | cpu);
+
+       /*
+        * Store cpu number in limit so that it can be loaded
+        * quickly in user space in vgetcpu. (12 bits for the CPU
+        * and 8 bits for the node)
+        */
+       d.limit0 = cpu | ((node & 0xf) << 12);
+       d.limit = node >> 4;
+       d.type = 5;             /* RO data, expand down, accessed */
+       d.dpl = 3;              /* Visible to user code */
+       d.s = 1;                /* Not a system segment */
+       d.p = 1;                /* Present */
+       d.d = 1;                /* 32-bit */
+
+       write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
+}
+
+static int
+vgetcpu_cpu_notifier(struct notifier_block *n, unsigned long action, void *arg)
+{
+       long cpu = (long)arg;
+
+       if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
+               smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
+
+       return NOTIFY_DONE;
+}
+
+static int __init init_vdso(void)
+{
+       init_vdso_image(&vdso_image_64);
+
+#ifdef CONFIG_X86_X32_ABI
+       init_vdso_image(&vdso_image_x32);
+#endif
+
+       cpu_notifier_register_begin();
+
+       on_each_cpu(vgetcpu_cpu_init, NULL, 1);
+       /* notifier priority > KVM */
+       __hotcpu_notifier(vgetcpu_cpu_notifier, 30);
+
+       cpu_notifier_register_done();
+
+       return 0;
+}
+subsys_initcall(init_vdso);
+#endif /* CONFIG_X86_64 */
diff --git a/arch/x86/entry/vsyscall/Makefile b/arch/x86/entry/vsyscall/Makefile
new file mode 100644 (file)
index 0000000..a9f4856
--- /dev/null
@@ -0,0 +1,7 @@
+#
+# Makefile for the x86 low level vsyscall code
+#
+obj-y                                  := vsyscall_gtod.o
+
+obj-$(CONFIG_X86_VSYSCALL_EMULATION)   += vsyscall_64.o vsyscall_emu_64.o
+
diff --git a/arch/x86/entry/vsyscall/vsyscall_64.c b/arch/x86/entry/vsyscall/vsyscall_64.c
new file mode 100644 (file)
index 0000000..2dcc6ff
--- /dev/null
@@ -0,0 +1,335 @@
+/*
+ * Copyright (c) 2012-2014 Andy Lutomirski <luto@amacapital.net>
+ *
+ * Based on the original implementation which is:
+ *  Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
+ *  Copyright 2003 Andi Kleen, SuSE Labs.
+ *
+ *  Parts of the original code have been moved to arch/x86/vdso/vma.c
+ *
+ * This file implements vsyscall emulation.  vsyscalls are a legacy ABI:
+ * Userspace can request certain kernel services by calling fixed
+ * addresses.  This concept is problematic:
+ *
+ * - It interferes with ASLR.
+ * - It's awkward to write code that lives in kernel addresses but is
+ *   callable by userspace at fixed addresses.
+ * - The whole concept is impossible for 32-bit compat userspace.
+ * - UML cannot easily virtualize a vsyscall.
+ *
+ * As of mid-2014, I believe that there is no new userspace code that
+ * will use a vsyscall if the vDSO is present.  I hope that there will
+ * soon be no new userspace code that will ever use a vsyscall.
+ *
+ * The code in this file emulates vsyscalls when notified of a page
+ * fault to a vsyscall address.
+ */
+
+#include <linux/kernel.h>
+#include <linux/timer.h>
+#include <linux/syscalls.h>
+#include <linux/ratelimit.h>
+
+#include <asm/vsyscall.h>
+#include <asm/unistd.h>
+#include <asm/fixmap.h>
+#include <asm/traps.h>
+
+#define CREATE_TRACE_POINTS
+#include "vsyscall_trace.h"
+
+static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
+
+static int __init vsyscall_setup(char *str)
+{
+       if (str) {
+               if (!strcmp("emulate", str))
+                       vsyscall_mode = EMULATE;
+               else if (!strcmp("native", str))
+                       vsyscall_mode = NATIVE;
+               else if (!strcmp("none", str))
+                       vsyscall_mode = NONE;
+               else
+                       return -EINVAL;
+
+               return 0;
+       }
+
+       return -EINVAL;
+}
+early_param("vsyscall", vsyscall_setup);
+
+static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
+                             const char *message)
+{
+       if (!show_unhandled_signals)
+               return;
+
+       printk_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
+                          level, current->comm, task_pid_nr(current),
+                          message, regs->ip, regs->cs,
+                          regs->sp, regs->ax, regs->si, regs->di);
+}
+
+static int addr_to_vsyscall_nr(unsigned long addr)
+{
+       int nr;
+
+       if ((addr & ~0xC00UL) != VSYSCALL_ADDR)
+               return -EINVAL;
+
+       nr = (addr & 0xC00UL) >> 10;
+       if (nr >= 3)
+               return -EINVAL;
+
+       return nr;
+}
+
+static bool write_ok_or_segv(unsigned long ptr, size_t size)
+{
+       /*
+        * XXX: if access_ok, get_user, and put_user handled
+        * sig_on_uaccess_error, this could go away.
+        */
+
+       if (!access_ok(VERIFY_WRITE, (void __user *)ptr, size)) {
+               siginfo_t info;
+               struct thread_struct *thread = &current->thread;
+
+               thread->error_code      = 6;  /* user fault, no page, write */
+               thread->cr2             = ptr;
+               thread->trap_nr         = X86_TRAP_PF;
+
+               memset(&info, 0, sizeof(info));
+               info.si_signo           = SIGSEGV;
+               info.si_errno           = 0;
+               info.si_code            = SEGV_MAPERR;
+               info.si_addr            = (void __user *)ptr;
+
+               force_sig_info(SIGSEGV, &info, current);
+               return false;
+       } else {
+               return true;
+       }
+}
+
+bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
+{
+       struct task_struct *tsk;
+       unsigned long caller;
+       int vsyscall_nr, syscall_nr, tmp;
+       int prev_sig_on_uaccess_error;
+       long ret;
+
+       /*
+        * No point in checking CS -- the only way to get here is a user mode
+        * trap to a high address, which means that we're in 64-bit user code.
+        */
+
+       WARN_ON_ONCE(address != regs->ip);
+
+       if (vsyscall_mode == NONE) {
+               warn_bad_vsyscall(KERN_INFO, regs,
+                                 "vsyscall attempted with vsyscall=none");
+               return false;
+       }
+
+       vsyscall_nr = addr_to_vsyscall_nr(address);
+
+       trace_emulate_vsyscall(vsyscall_nr);
+
+       if (vsyscall_nr < 0) {
+               warn_bad_vsyscall(KERN_WARNING, regs,
+                                 "misaligned vsyscall (exploit attempt or buggy program) -- look up the vsyscall kernel parameter if you need a workaround");
+               goto sigsegv;
+       }
+
+       if (get_user(caller, (unsigned long __user *)regs->sp) != 0) {
+               warn_bad_vsyscall(KERN_WARNING, regs,
+                                 "vsyscall with bad stack (exploit attempt?)");
+               goto sigsegv;
+       }
+
+       tsk = current;
+
+       /*
+        * Check for access_ok violations and find the syscall nr.
+        *
+        * NULL is a valid user pointer (in the access_ok sense) on 32-bit and
+        * 64-bit, so we don't need to special-case it here.  For all the
+        * vsyscalls, NULL means "don't write anything" not "write it at
+        * address 0".
+        */
+       switch (vsyscall_nr) {
+       case 0:
+               if (!write_ok_or_segv(regs->di, sizeof(struct timeval)) ||
+                   !write_ok_or_segv(regs->si, sizeof(struct timezone))) {
+                       ret = -EFAULT;
+                       goto check_fault;
+               }
+
+               syscall_nr = __NR_gettimeofday;
+               break;
+
+       case 1:
+               if (!write_ok_or_segv(regs->di, sizeof(time_t))) {
+                       ret = -EFAULT;
+                       goto check_fault;
+               }
+
+               syscall_nr = __NR_time;
+               break;
+
+       case 2:
+               if (!write_ok_or_segv(regs->di, sizeof(unsigned)) ||
+                   !write_ok_or_segv(regs->si, sizeof(unsigned))) {
+                       ret = -EFAULT;
+                       goto check_fault;
+               }
+
+               syscall_nr = __NR_getcpu;
+               break;
+       }
+
+       /*
+        * Handle seccomp.  regs->ip must be the original value.
+        * See seccomp_send_sigsys and Documentation/prctl/seccomp_filter.txt.
+        *
+        * We could optimize the seccomp disabled case, but performance
+        * here doesn't matter.
+        */
+       regs->orig_ax = syscall_nr;
+       regs->ax = -ENOSYS;
+       tmp = secure_computing();
+       if ((!tmp && regs->orig_ax != syscall_nr) || regs->ip != address) {
+               warn_bad_vsyscall(KERN_DEBUG, regs,
+                                 "seccomp tried to change syscall nr or ip");
+               do_exit(SIGSYS);
+       }
+       regs->orig_ax = -1;
+       if (tmp)
+               goto do_ret;  /* skip requested */
+
+       /*
+        * With a real vsyscall, page faults cause SIGSEGV.  We want to
+        * preserve that behavior to make writing exploits harder.
+        */
+       prev_sig_on_uaccess_error = current_thread_info()->sig_on_uaccess_error;
+       current_thread_info()->sig_on_uaccess_error = 1;
+
+       ret = -EFAULT;
+       switch (vsyscall_nr) {
+       case 0:
+               ret = sys_gettimeofday(
+                       (struct timeval __user *)regs->di,
+                       (struct timezone __user *)regs->si);
+               break;
+
+       case 1:
+               ret = sys_time((time_t __user *)regs->di);
+               break;
+
+       case 2:
+               ret = sys_getcpu((unsigned __user *)regs->di,
+                                (unsigned __user *)regs->si,
+                                NULL);
+               break;
+       }
+
+       current_thread_info()->sig_on_uaccess_error = prev_sig_on_uaccess_error;
+
+check_fault:
+       if (ret == -EFAULT) {
+               /* Bad news -- userspace fed a bad pointer to a vsyscall. */
+               warn_bad_vsyscall(KERN_INFO, regs,
+                                 "vsyscall fault (exploit attempt?)");
+
+               /*
+                * If we failed to generate a signal for any reason,
+                * generate one here.  (This should be impossible.)
+                */
+               if (WARN_ON_ONCE(!sigismember(&tsk->pending.signal, SIGBUS) &&
+                                !sigismember(&tsk->pending.signal, SIGSEGV)))
+                       goto sigsegv;
+
+               return true;  /* Don't emulate the ret. */
+       }
+
+       regs->ax = ret;
+
+do_ret:
+       /* Emulate a ret instruction. */
+       regs->ip = caller;
+       regs->sp += 8;
+       return true;
+
+sigsegv:
+       force_sig(SIGSEGV, current);
+       return true;
+}
+
+/*
+ * A pseudo VMA to allow ptrace access for the vsyscall page.  This only
+ * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
+ * not need special handling anymore:
+ */
+static const char *gate_vma_name(struct vm_area_struct *vma)
+{
+       return "[vsyscall]";
+}
+static struct vm_operations_struct gate_vma_ops = {
+       .name = gate_vma_name,
+};
+static struct vm_area_struct gate_vma = {
+       .vm_start       = VSYSCALL_ADDR,
+       .vm_end         = VSYSCALL_ADDR + PAGE_SIZE,
+       .vm_page_prot   = PAGE_READONLY_EXEC,
+       .vm_flags       = VM_READ | VM_EXEC,
+       .vm_ops         = &gate_vma_ops,
+};
+
+struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
+{
+#ifdef CONFIG_IA32_EMULATION
+       if (!mm || mm->context.ia32_compat)
+               return NULL;
+#endif
+       if (vsyscall_mode == NONE)
+               return NULL;
+       return &gate_vma;
+}
+
+int in_gate_area(struct mm_struct *mm, unsigned long addr)
+{
+       struct vm_area_struct *vma = get_gate_vma(mm);
+
+       if (!vma)
+               return 0;
+
+       return (addr >= vma->vm_start) && (addr < vma->vm_end);
+}
+
+/*
+ * Use this when you have no reliable mm, typically from interrupt
+ * context. It is less reliable than using a task's mm and may give
+ * false positives.
+ */
+int in_gate_area_no_mm(unsigned long addr)
+{
+       return vsyscall_mode != NONE && (addr & PAGE_MASK) == VSYSCALL_ADDR;
+}
+
+void __init map_vsyscall(void)
+{
+       extern char __vsyscall_page;
+       unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
+
+       if (vsyscall_mode != NONE)
+               __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
+                            vsyscall_mode == NATIVE
+                            ? PAGE_KERNEL_VSYSCALL
+                            : PAGE_KERNEL_VVAR);
+
+       BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
+                    (unsigned long)VSYSCALL_ADDR);
+}
diff --git a/arch/x86/entry/vsyscall/vsyscall_emu_64.S b/arch/x86/entry/vsyscall/vsyscall_emu_64.S
new file mode 100644 (file)
index 0000000..c9596a9
--- /dev/null
@@ -0,0 +1,37 @@
+/*
+ * vsyscall_emu_64.S: Vsyscall emulation page
+ *
+ * Copyright (c) 2011 Andy Lutomirski
+ *
+ * Subject to the GNU General Public License, version 2
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/irq_vectors.h>
+#include <asm/page_types.h>
+#include <asm/unistd_64.h>
+
+__PAGE_ALIGNED_DATA
+       .globl __vsyscall_page
+       .balign PAGE_SIZE, 0xcc
+       .type __vsyscall_page, @object
+__vsyscall_page:
+
+       mov $__NR_gettimeofday, %rax
+       syscall
+       ret
+
+       .balign 1024, 0xcc
+       mov $__NR_time, %rax
+       syscall
+       ret
+
+       .balign 1024, 0xcc
+       mov $__NR_getcpu, %rax
+       syscall
+       ret
+
+       .balign 4096, 0xcc
+
+       .size __vsyscall_page, 4096
diff --git a/arch/x86/entry/vsyscall/vsyscall_gtod.c b/arch/x86/entry/vsyscall/vsyscall_gtod.c
new file mode 100644 (file)
index 0000000..51e3304
--- /dev/null
@@ -0,0 +1,70 @@
+/*
+ *  Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
+ *  Copyright 2003 Andi Kleen, SuSE Labs.
+ *
+ *  Modified for x86 32 bit architecture by
+ *  Stefani Seibold <stefani@seibold.net>
+ *  sponsored by Rohde & Schwarz GmbH & Co. KG Munich/Germany
+ *
+ *  Thanks to hpa@transmeta.com for some useful hint.
+ *  Special thanks to Ingo Molnar for his early experience with
+ *  a different vsyscall implementation for Linux/IA32 and for the name.
+ *
+ */
+
+#include <linux/timekeeper_internal.h>
+#include <asm/vgtod.h>
+#include <asm/vvar.h>
+
+DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
+
+void update_vsyscall_tz(void)
+{
+       vsyscall_gtod_data.tz_minuteswest = sys_tz.tz_minuteswest;
+       vsyscall_gtod_data.tz_dsttime = sys_tz.tz_dsttime;
+}
+
+void update_vsyscall(struct timekeeper *tk)
+{
+       struct vsyscall_gtod_data *vdata = &vsyscall_gtod_data;
+
+       gtod_write_begin(vdata);
+
+       /* copy vsyscall data */
+       vdata->vclock_mode      = tk->tkr_mono.clock->archdata.vclock_mode;
+       vdata->cycle_last       = tk->tkr_mono.cycle_last;
+       vdata->mask             = tk->tkr_mono.mask;
+       vdata->mult             = tk->tkr_mono.mult;
+       vdata->shift            = tk->tkr_mono.shift;
+
+       vdata->wall_time_sec            = tk->xtime_sec;
+       vdata->wall_time_snsec          = tk->tkr_mono.xtime_nsec;
+
+       vdata->monotonic_time_sec       = tk->xtime_sec
+                                       + tk->wall_to_monotonic.tv_sec;
+       vdata->monotonic_time_snsec     = tk->tkr_mono.xtime_nsec
+                                       + ((u64)tk->wall_to_monotonic.tv_nsec
+                                               << tk->tkr_mono.shift);
+       while (vdata->monotonic_time_snsec >=
+                                       (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
+               vdata->monotonic_time_snsec -=
+                                       ((u64)NSEC_PER_SEC) << tk->tkr_mono.shift;
+               vdata->monotonic_time_sec++;
+       }
+
+       vdata->wall_time_coarse_sec     = tk->xtime_sec;
+       vdata->wall_time_coarse_nsec    = (long)(tk->tkr_mono.xtime_nsec >>
+                                                tk->tkr_mono.shift);
+
+       vdata->monotonic_time_coarse_sec =
+               vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec;
+       vdata->monotonic_time_coarse_nsec =
+               vdata->wall_time_coarse_nsec + tk->wall_to_monotonic.tv_nsec;
+
+       while (vdata->monotonic_time_coarse_nsec >= NSEC_PER_SEC) {
+               vdata->monotonic_time_coarse_nsec -= NSEC_PER_SEC;
+               vdata->monotonic_time_coarse_sec++;
+       }
+
+       gtod_write_end(vdata);
+}
diff --git a/arch/x86/entry/vsyscall/vsyscall_trace.h b/arch/x86/entry/vsyscall/vsyscall_trace.h
new file mode 100644 (file)
index 0000000..9dd7359
--- /dev/null
@@ -0,0 +1,29 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM vsyscall
+
+#if !defined(__VSYSCALL_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define __VSYSCALL_TRACE_H
+
+#include <linux/tracepoint.h>
+
+TRACE_EVENT(emulate_vsyscall,
+
+           TP_PROTO(int nr),
+
+           TP_ARGS(nr),
+
+           TP_STRUCT__entry(__field(int, nr)),
+
+           TP_fast_assign(
+                          __entry->nr = nr;
+                          ),
+
+           TP_printk("nr = %d", __entry->nr)
+);
+
+#endif
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH ../../arch/x86/entry/vsyscall/
+#define TRACE_INCLUDE_FILE vsyscall_trace
+#include <trace/define_trace.h>
index bb635c6418692f4fea1d02fce3b1c170f1a38b0f..cd4339bae066032a69c02cfb1917f210a7f963e8 100644 (file)
@@ -2,7 +2,7 @@
 # Makefile for the ia32 kernel emulation subsystem.
 #
 
-obj-$(CONFIG_IA32_EMULATION) := ia32entry.o sys_ia32.o ia32_signal.o
+obj-$(CONFIG_IA32_EMULATION) := sys_ia32.o ia32_signal.o
 
 obj-$(CONFIG_IA32_AOUT) += ia32_aout.o
 
index c81d35e6c7f1d91c22734793c006c0f5f33c0c10..ae3a29ae875b5508d62b4d91c79db3f1dc26581d 100644 (file)
@@ -21,8 +21,8 @@
 #include <linux/binfmts.h>
 #include <asm/ucontext.h>
 #include <asm/uaccess.h>
-#include <asm/i387.h>
-#include <asm/fpu-internal.h>
+#include <asm/fpu/internal.h>
+#include <asm/fpu/signal.h>
 #include <asm/ptrace.h>
 #include <asm/ia32_unistd.h>
 #include <asm/user32.h>
@@ -198,7 +198,7 @@ static int ia32_restore_sigcontext(struct pt_regs *regs,
                buf = compat_ptr(tmp);
        } get_user_catch(err);
 
-       err |= restore_xstate_sig(buf, 1);
+       err |= fpu__restore_sig(buf, 1);
 
        force_iret();
 
@@ -308,6 +308,7 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
                                 size_t frame_size,
                                 void __user **fpstate)
 {
+       struct fpu *fpu = &current->thread.fpu;
        unsigned long sp;
 
        /* Default to using normal stack */
@@ -322,12 +323,12 @@ static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
                 ksig->ka.sa.sa_restorer)
                sp = (unsigned long) ksig->ka.sa.sa_restorer;
 
-       if (used_math()) {
+       if (fpu->fpstate_active) {
                unsigned long fx_aligned, math_size;
 
-               sp = alloc_mathframe(sp, 1, &fx_aligned, &math_size);
+               sp = fpu__alloc_mathframe(sp, 1, &fx_aligned, &math_size);
                *fpstate = (struct _fpstate_ia32 __user *) sp;
-               if (save_xstate_sig(*fpstate, (void __user *)fx_aligned,
+               if (copy_fpstate_to_sigframe(*fpstate, (void __user *)fx_aligned,
                                    math_size) < 0)
                        return (void __user *) -1L;
        }
diff --git a/arch/x86/ia32/ia32entry.S b/arch/x86/ia32/ia32entry.S
deleted file mode 100644 (file)
index 72bf268..0000000
+++ /dev/null
@@ -1,611 +0,0 @@
-/*
- * Compatibility mode system call entry point for x86-64. 
- *             
- * Copyright 2000-2002 Andi Kleen, SuSE Labs.
- */             
-
-#include <asm/dwarf2.h>
-#include <asm/calling.h>
-#include <asm/asm-offsets.h>
-#include <asm/current.h>
-#include <asm/errno.h>
-#include <asm/ia32_unistd.h>   
-#include <asm/thread_info.h>   
-#include <asm/segment.h>
-#include <asm/irqflags.h>
-#include <asm/asm.h>
-#include <asm/smap.h>
-#include <linux/linkage.h>
-#include <linux/err.h>
-
-/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
-#include <linux/elf-em.h>
-#define AUDIT_ARCH_I386                (EM_386|__AUDIT_ARCH_LE)
-#define __AUDIT_ARCH_LE           0x40000000
-
-#ifndef CONFIG_AUDITSYSCALL
-#define sysexit_audit ia32_ret_from_sys_call
-#define sysretl_audit ia32_ret_from_sys_call
-#endif
-
-       .section .entry.text, "ax"
-
-       /* clobbers %rax */
-       .macro  CLEAR_RREGS _r9=rax
-       xorl    %eax,%eax
-       movq    %rax,R11(%rsp)
-       movq    %rax,R10(%rsp)
-       movq    %\_r9,R9(%rsp)
-       movq    %rax,R8(%rsp)
-       .endm
-
-       /*
-        * Reload arg registers from stack in case ptrace changed them.
-        * We don't reload %eax because syscall_trace_enter() returned
-        * the %rax value we should see.  Instead, we just truncate that
-        * value to 32 bits again as we did on entry from user mode.
-        * If it's a new value set by user_regset during entry tracing,
-        * this matches the normal truncation of the user-mode value.
-        * If it's -1 to make us punt the syscall, then (u32)-1 is still
-        * an appropriately invalid value.
-        */
-       .macro LOAD_ARGS32 _r9=0
-       .if \_r9
-       movl R9(%rsp),%r9d
-       .endif
-       movl RCX(%rsp),%ecx
-       movl RDX(%rsp),%edx
-       movl RSI(%rsp),%esi
-       movl RDI(%rsp),%edi
-       movl %eax,%eax                  /* zero extension */
-       .endm
-       
-       .macro CFI_STARTPROC32 simple
-       CFI_STARTPROC   \simple
-       CFI_UNDEFINED   r8
-       CFI_UNDEFINED   r9
-       CFI_UNDEFINED   r10
-       CFI_UNDEFINED   r11
-       CFI_UNDEFINED   r12
-       CFI_UNDEFINED   r13
-       CFI_UNDEFINED   r14
-       CFI_UNDEFINED   r15
-       .endm
-
-#ifdef CONFIG_PARAVIRT
-ENTRY(native_usergs_sysret32)
-       swapgs
-       sysretl
-ENDPROC(native_usergs_sysret32)
-
-ENTRY(native_irq_enable_sysexit)
-       swapgs
-       sti
-       sysexit
-ENDPROC(native_irq_enable_sysexit)
-#endif
-
-/*
- * 32bit SYSENTER instruction entry.
- *
- * SYSENTER loads ss, rsp, cs, and rip from previously programmed MSRs.
- * IF and VM in rflags are cleared (IOW: interrupts are off).
- * SYSENTER does not save anything on the stack,
- * and does not save old rip (!!!) and rflags.
- *
- * Arguments:
- * eax  system call number
- * ebx  arg1
- * ecx  arg2
- * edx  arg3
- * esi  arg4
- * edi  arg5
- * ebp  user stack
- * 0(%ebp) arg6
- *
- * This is purely a fast path. For anything complicated we use the int 0x80
- * path below. We set up a complete hardware stack frame to share code
- * with the int 0x80 path.
- */
-ENTRY(ia32_sysenter_target)
-       CFI_STARTPROC32 simple
-       CFI_SIGNAL_FRAME
-       CFI_DEF_CFA     rsp,0
-       CFI_REGISTER    rsp,rbp
-
-       /*
-        * Interrupts are off on entry.
-        * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
-        * it is too small to ever cause noticeable irq latency.
-        */
-       SWAPGS_UNSAFE_STACK
-       movq    PER_CPU_VAR(cpu_tss + TSS_sp0), %rsp
-       ENABLE_INTERRUPTS(CLBR_NONE)
-
-       /* Zero-extending 32-bit regs, do not remove */
-       movl    %ebp, %ebp
-       movl    %eax, %eax
-
-       movl    ASM_THREAD_INFO(TI_sysenter_return, %rsp, 0), %r10d
-       CFI_REGISTER rip,r10
-
-       /* Construct struct pt_regs on stack */
-       pushq_cfi       $__USER32_DS            /* pt_regs->ss */
-       pushq_cfi       %rbp                    /* pt_regs->sp */
-       CFI_REL_OFFSET  rsp,0
-       pushfq_cfi                              /* pt_regs->flags */
-       pushq_cfi       $__USER32_CS            /* pt_regs->cs */
-       pushq_cfi       %r10 /* pt_regs->ip = thread_info->sysenter_return */
-       CFI_REL_OFFSET  rip,0
-       pushq_cfi_reg   rax                     /* pt_regs->orig_ax */
-       pushq_cfi_reg   rdi                     /* pt_regs->di */
-       pushq_cfi_reg   rsi                     /* pt_regs->si */
-       pushq_cfi_reg   rdx                     /* pt_regs->dx */
-       pushq_cfi_reg   rcx                     /* pt_regs->cx */
-       pushq_cfi_reg   rax                     /* pt_regs->ax */
-       cld
-       sub     $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
-       CFI_ADJUST_CFA_OFFSET 10*8
-
-       /*
-        * no need to do an access_ok check here because rbp has been
-        * 32bit zero extended
-        */
-       ASM_STAC
-1:     movl    (%rbp),%ebp
-       _ASM_EXTABLE(1b,ia32_badarg)
-       ASM_CLAC
-
-       /*
-        * Sysenter doesn't filter flags, so we need to clear NT
-        * ourselves.  To save a few cycles, we can check whether
-        * NT was set instead of doing an unconditional popfq.
-        */
-       testl $X86_EFLAGS_NT,EFLAGS(%rsp)
-       jnz sysenter_fix_flags
-sysenter_flags_fixed:
-
-       orl     $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
-       testl   $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-       CFI_REMEMBER_STATE
-       jnz  sysenter_tracesys
-       cmpq    $(IA32_NR_syscalls-1),%rax
-       ja      ia32_badsys
-sysenter_do_call:
-       /* 32bit syscall -> 64bit C ABI argument conversion */
-       movl    %edi,%r8d       /* arg5 */
-       movl    %ebp,%r9d       /* arg6 */
-       xchg    %ecx,%esi       /* rsi:arg2, rcx:arg4 */
-       movl    %ebx,%edi       /* arg1 */
-       movl    %edx,%edx       /* arg3 (zero extension) */
-sysenter_dispatch:
-       call    *ia32_sys_call_table(,%rax,8)
-       movq    %rax,RAX(%rsp)
-       DISABLE_INTERRUPTS(CLBR_NONE)
-       TRACE_IRQS_OFF
-       testl   $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-       jnz     sysexit_audit
-sysexit_from_sys_call:
-       /*
-        * NB: SYSEXIT is not obviously safe for 64-bit kernels -- an
-        * NMI between STI and SYSEXIT has poorly specified behavior,
-        * and and NMI followed by an IRQ with usergs is fatal.  So
-        * we just pretend we're using SYSEXIT but we really use
-        * SYSRETL instead.
-        *
-        * This code path is still called 'sysexit' because it pairs
-        * with 'sysenter' and it uses the SYSENTER calling convention.
-        */
-       andl    $~TS_COMPAT,ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
-       movl    RIP(%rsp),%ecx          /* User %eip */
-       CFI_REGISTER rip,rcx
-       RESTORE_RSI_RDI
-       xorl    %edx,%edx               /* avoid info leaks */
-       xorq    %r8,%r8
-       xorq    %r9,%r9
-       xorq    %r10,%r10
-       movl    EFLAGS(%rsp),%r11d      /* User eflags */
-       /*CFI_RESTORE rflags*/
-       TRACE_IRQS_ON
-
-       /*
-        * SYSRETL works even on Intel CPUs.  Use it in preference to SYSEXIT,
-        * since it avoids a dicey window with interrupts enabled.
-        */
-       movl    RSP(%rsp),%esp
-
-       /*
-        * USERGS_SYSRET32 does:
-        *  gsbase = user's gs base
-        *  eip = ecx
-        *  rflags = r11
-        *  cs = __USER32_CS
-        *  ss = __USER_DS
-        *
-        * The prologue set RIP(%rsp) to VDSO32_SYSENTER_RETURN, which does:
-        *
-        *  pop %ebp
-        *  pop %edx
-        *  pop %ecx
-        *
-        * Therefore, we invoke SYSRETL with EDX and R8-R10 zeroed to
-        * avoid info leaks.  R11 ends up with VDSO32_SYSENTER_RETURN's
-        * address (already known to user code), and R12-R15 are
-        * callee-saved and therefore don't contain any interesting
-        * kernel data.
-        */
-       USERGS_SYSRET32
-
-       CFI_RESTORE_STATE
-
-#ifdef CONFIG_AUDITSYSCALL
-       .macro auditsys_entry_common
-       movl %esi,%r8d                  /* 5th arg: 4th syscall arg */
-       movl %ecx,%r9d                  /*swap with edx*/
-       movl %edx,%ecx                  /* 4th arg: 3rd syscall arg */
-       movl %r9d,%edx                  /* 3rd arg: 2nd syscall arg */
-       movl %ebx,%esi                  /* 2nd arg: 1st syscall arg */
-       movl %eax,%edi                  /* 1st arg: syscall number */
-       call __audit_syscall_entry
-       movl RAX(%rsp),%eax     /* reload syscall number */
-       cmpq $(IA32_NR_syscalls-1),%rax
-       ja ia32_badsys
-       movl %ebx,%edi                  /* reload 1st syscall arg */
-       movl RCX(%rsp),%esi     /* reload 2nd syscall arg */
-       movl RDX(%rsp),%edx     /* reload 3rd syscall arg */
-       movl RSI(%rsp),%ecx     /* reload 4th syscall arg */
-       movl RDI(%rsp),%r8d     /* reload 5th syscall arg */
-       .endm
-
-       .macro auditsys_exit exit
-       testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-       jnz ia32_ret_from_sys_call
-       TRACE_IRQS_ON
-       ENABLE_INTERRUPTS(CLBR_NONE)
-       movl %eax,%esi          /* second arg, syscall return value */
-       cmpl $-MAX_ERRNO,%eax   /* is it an error ? */
-       jbe 1f
-       movslq %eax, %rsi       /* if error sign extend to 64 bits */
-1:     setbe %al               /* 1 if error, 0 if not */
-       movzbl %al,%edi         /* zero-extend that into %edi */
-       call __audit_syscall_exit
-       movq RAX(%rsp),%rax     /* reload syscall return value */
-       movl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT),%edi
-       DISABLE_INTERRUPTS(CLBR_NONE)
-       TRACE_IRQS_OFF
-       testl %edi, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-       jz \exit
-       CLEAR_RREGS
-       jmp int_with_check
-       .endm
-
-sysenter_auditsys:
-       auditsys_entry_common
-       movl %ebp,%r9d                  /* reload 6th syscall arg */
-       jmp sysenter_dispatch
-
-sysexit_audit:
-       auditsys_exit sysexit_from_sys_call
-#endif
-
-sysenter_fix_flags:
-       pushq_cfi $(X86_EFLAGS_IF|X86_EFLAGS_FIXED)
-       popfq_cfi
-       jmp sysenter_flags_fixed
-
-sysenter_tracesys:
-#ifdef CONFIG_AUDITSYSCALL
-       testl   $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-       jz      sysenter_auditsys
-#endif
-       SAVE_EXTRA_REGS
-       CLEAR_RREGS
-       movq    $-ENOSYS,RAX(%rsp)/* ptrace can change this for a bad syscall */
-       movq    %rsp,%rdi        /* &pt_regs -> arg1 */
-       call    syscall_trace_enter
-       LOAD_ARGS32  /* reload args from stack in case ptrace changed it */
-       RESTORE_EXTRA_REGS
-       cmpq    $(IA32_NR_syscalls-1),%rax
-       ja      int_ret_from_sys_call /* sysenter_tracesys has set RAX(%rsp) */
-       jmp     sysenter_do_call
-       CFI_ENDPROC
-ENDPROC(ia32_sysenter_target)
-
-/*
- * 32bit SYSCALL instruction entry.
- *
- * 32bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
- * then loads new ss, cs, and rip from previously programmed MSRs.
- * rflags gets masked by a value from another MSR (so CLD and CLAC
- * are not needed). SYSCALL does not save anything on the stack
- * and does not change rsp.
- *
- * Note: rflags saving+masking-with-MSR happens only in Long mode
- * (in legacy 32bit mode, IF, RF and VM bits are cleared and that's it).
- * Don't get confused: rflags saving+masking depends on Long Mode Active bit
- * (EFER.LMA=1), NOT on bitness of userspace where SYSCALL executes
- * or target CS descriptor's L bit (SYSCALL does not read segment descriptors).
- *
- * Arguments:
- * eax  system call number
- * ecx  return address
- * ebx  arg1
- * ebp  arg2   (note: not saved in the stack frame, should not be touched)
- * edx  arg3
- * esi  arg4
- * edi  arg5
- * esp  user stack
- * 0(%esp) arg6
- *
- * This is purely a fast path. For anything complicated we use the int 0x80
- * path below. We set up a complete hardware stack frame to share code
- * with the int 0x80 path.
- */
-ENTRY(ia32_cstar_target)
-       CFI_STARTPROC32 simple
-       CFI_SIGNAL_FRAME
-       CFI_DEF_CFA     rsp,0
-       CFI_REGISTER    rip,rcx
-       /*CFI_REGISTER  rflags,r11*/
-
-       /*
-        * Interrupts are off on entry.
-        * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
-        * it is too small to ever cause noticeable irq latency.
-        */
-       SWAPGS_UNSAFE_STACK
-       movl    %esp,%r8d
-       CFI_REGISTER    rsp,r8
-       movq    PER_CPU_VAR(kernel_stack),%rsp
-       ENABLE_INTERRUPTS(CLBR_NONE)
-
-       /* Zero-extending 32-bit regs, do not remove */
-       movl    %eax,%eax
-
-       /* Construct struct pt_regs on stack */
-       pushq_cfi       $__USER32_DS            /* pt_regs->ss */
-       pushq_cfi       %r8                     /* pt_regs->sp */
-       CFI_REL_OFFSET rsp,0
-       pushq_cfi       %r11                    /* pt_regs->flags */
-       pushq_cfi       $__USER32_CS            /* pt_regs->cs */
-       pushq_cfi       %rcx                    /* pt_regs->ip */
-       CFI_REL_OFFSET rip,0
-       pushq_cfi_reg   rax                     /* pt_regs->orig_ax */
-       pushq_cfi_reg   rdi                     /* pt_regs->di */
-       pushq_cfi_reg   rsi                     /* pt_regs->si */
-       pushq_cfi_reg   rdx                     /* pt_regs->dx */
-       pushq_cfi_reg   rbp                     /* pt_regs->cx */
-       movl    %ebp,%ecx
-       pushq_cfi_reg   rax                     /* pt_regs->ax */
-       sub     $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
-       CFI_ADJUST_CFA_OFFSET 10*8
-
-       /*
-        * no need to do an access_ok check here because r8 has been
-        * 32bit zero extended
-        */
-       ASM_STAC
-1:     movl    (%r8),%r9d
-       _ASM_EXTABLE(1b,ia32_badarg)
-       ASM_CLAC
-       orl     $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
-       testl   $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-       CFI_REMEMBER_STATE
-       jnz   cstar_tracesys
-       cmpq $IA32_NR_syscalls-1,%rax
-       ja  ia32_badsys
-cstar_do_call:
-       /* 32bit syscall -> 64bit C ABI argument conversion */
-       movl    %edi,%r8d       /* arg5 */
-       /* r9 already loaded */ /* arg6 */
-       xchg    %ecx,%esi       /* rsi:arg2, rcx:arg4 */
-       movl    %ebx,%edi       /* arg1 */
-       movl    %edx,%edx       /* arg3 (zero extension) */
-cstar_dispatch:
-       call *ia32_sys_call_table(,%rax,8)
-       movq %rax,RAX(%rsp)
-       DISABLE_INTERRUPTS(CLBR_NONE)
-       TRACE_IRQS_OFF
-       testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-       jnz sysretl_audit
-sysretl_from_sys_call:
-       andl $~TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
-       RESTORE_RSI_RDI_RDX
-       movl RIP(%rsp),%ecx
-       CFI_REGISTER rip,rcx
-       movl EFLAGS(%rsp),%r11d
-       /*CFI_REGISTER rflags,r11*/
-       xorq    %r10,%r10
-       xorq    %r9,%r9
-       xorq    %r8,%r8
-       TRACE_IRQS_ON
-       movl RSP(%rsp),%esp
-       CFI_RESTORE rsp
-       /*
-        * 64bit->32bit SYSRET restores eip from ecx,
-        * eflags from r11 (but RF and VM bits are forced to 0),
-        * cs and ss are loaded from MSRs.
-        * (Note: 32bit->32bit SYSRET is different: since r11
-        * does not exist, it merely sets eflags.IF=1).
-        *
-        * NB: On AMD CPUs with the X86_BUG_SYSRET_SS_ATTRS bug, the ss
-        * descriptor is not reinitialized.  This means that we must
-        * avoid SYSRET with SS == NULL, which could happen if we schedule,
-        * exit the kernel, and re-enter using an interrupt vector.  (All
-        * interrupt entries on x86_64 set SS to NULL.)  We prevent that
-        * from happening by reloading SS in __switch_to.
-        */
-       USERGS_SYSRET32
-
-#ifdef CONFIG_AUDITSYSCALL
-cstar_auditsys:
-       CFI_RESTORE_STATE
-       movl %r9d,R9(%rsp)      /* register to be clobbered by call */
-       auditsys_entry_common
-       movl R9(%rsp),%r9d      /* reload 6th syscall arg */
-       jmp cstar_dispatch
-
-sysretl_audit:
-       auditsys_exit sysretl_from_sys_call
-#endif
-
-cstar_tracesys:
-#ifdef CONFIG_AUDITSYSCALL
-       testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT), ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-       jz cstar_auditsys
-#endif
-       xchgl %r9d,%ebp
-       SAVE_EXTRA_REGS
-       CLEAR_RREGS r9
-       movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
-       movq %rsp,%rdi        /* &pt_regs -> arg1 */
-       call syscall_trace_enter
-       LOAD_ARGS32 1   /* reload args from stack in case ptrace changed it */
-       RESTORE_EXTRA_REGS
-       xchgl %ebp,%r9d
-       cmpq $(IA32_NR_syscalls-1),%rax
-       ja int_ret_from_sys_call /* cstar_tracesys has set RAX(%rsp) */
-       jmp cstar_do_call
-END(ia32_cstar_target)
-                               
-ia32_badarg:
-       ASM_CLAC
-       movq $-EFAULT,%rax
-       jmp ia32_sysret
-       CFI_ENDPROC
-
-/*
- * Emulated IA32 system calls via int 0x80.
- *
- * Arguments:
- * eax  system call number
- * ebx  arg1
- * ecx  arg2
- * edx  arg3
- * esi  arg4
- * edi  arg5
- * ebp  arg6   (note: not saved in the stack frame, should not be touched)
- *
- * Notes:
- * Uses the same stack frame as the x86-64 version.
- * All registers except eax must be saved (but ptrace may violate that).
- * Arguments are zero extended. For system calls that want sign extension and
- * take long arguments a wrapper is needed. Most calls can just be called
- * directly.
- * Assumes it is only called from user space and entered with interrupts off.
- */
-
-ENTRY(ia32_syscall)
-       CFI_STARTPROC32 simple
-       CFI_SIGNAL_FRAME
-       CFI_DEF_CFA     rsp,5*8
-       /*CFI_REL_OFFSET        ss,4*8 */
-       CFI_REL_OFFSET  rsp,3*8
-       /*CFI_REL_OFFSET        rflags,2*8 */
-       /*CFI_REL_OFFSET        cs,1*8 */
-       CFI_REL_OFFSET  rip,0*8
-
-       /*
-        * Interrupts are off on entry.
-        * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
-        * it is too small to ever cause noticeable irq latency.
-        */
-       PARAVIRT_ADJUST_EXCEPTION_FRAME
-       SWAPGS
-       ENABLE_INTERRUPTS(CLBR_NONE)
-
-       /* Zero-extending 32-bit regs, do not remove */
-       movl    %eax,%eax
-
-       /* Construct struct pt_regs on stack (iret frame is already on stack) */
-       pushq_cfi_reg   rax                     /* pt_regs->orig_ax */
-       pushq_cfi_reg   rdi                     /* pt_regs->di */
-       pushq_cfi_reg   rsi                     /* pt_regs->si */
-       pushq_cfi_reg   rdx                     /* pt_regs->dx */
-       pushq_cfi_reg   rcx                     /* pt_regs->cx */
-       pushq_cfi_reg   rax                     /* pt_regs->ax */
-       cld
-       sub     $(10*8),%rsp /* pt_regs->r8-11,bp,bx,r12-15 not saved */
-       CFI_ADJUST_CFA_OFFSET 10*8
-
-       orl $TS_COMPAT, ASM_THREAD_INFO(TI_status, %rsp, SIZEOF_PTREGS)
-       testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-       jnz ia32_tracesys
-       cmpq $(IA32_NR_syscalls-1),%rax
-       ja ia32_badsys
-ia32_do_call:
-       /* 32bit syscall -> 64bit C ABI argument conversion */
-       movl %edi,%r8d  /* arg5 */
-       movl %ebp,%r9d  /* arg6 */
-       xchg %ecx,%esi  /* rsi:arg2, rcx:arg4 */
-       movl %ebx,%edi  /* arg1 */
-       movl %edx,%edx  /* arg3 (zero extension) */
-       call *ia32_sys_call_table(,%rax,8) # xxx: rip relative
-ia32_sysret:
-       movq %rax,RAX(%rsp)
-ia32_ret_from_sys_call:
-       CLEAR_RREGS
-       jmp int_ret_from_sys_call
-
-ia32_tracesys:
-       SAVE_EXTRA_REGS
-       CLEAR_RREGS
-       movq $-ENOSYS,RAX(%rsp) /* ptrace can change this for a bad syscall */
-       movq %rsp,%rdi        /* &pt_regs -> arg1 */
-       call syscall_trace_enter
-       LOAD_ARGS32     /* reload args from stack in case ptrace changed it */
-       RESTORE_EXTRA_REGS
-       cmpq $(IA32_NR_syscalls-1),%rax
-       ja  int_ret_from_sys_call       /* ia32_tracesys has set RAX(%rsp) */
-       jmp ia32_do_call
-END(ia32_syscall)
-
-ia32_badsys:
-       movq $0,ORIG_RAX(%rsp)
-       movq $-ENOSYS,%rax
-       jmp ia32_sysret
-
-       CFI_ENDPROC
-       
-       .macro PTREGSCALL label, func
-       ALIGN
-GLOBAL(\label)
-       leaq \func(%rip),%rax
-       jmp  ia32_ptregs_common 
-       .endm
-
-       CFI_STARTPROC32
-
-       PTREGSCALL stub32_rt_sigreturn, sys32_rt_sigreturn
-       PTREGSCALL stub32_sigreturn, sys32_sigreturn
-       PTREGSCALL stub32_fork, sys_fork
-       PTREGSCALL stub32_vfork, sys_vfork
-
-       ALIGN
-GLOBAL(stub32_clone)
-       leaq sys_clone(%rip),%rax
-       mov     %r8, %rcx
-       jmp  ia32_ptregs_common 
-
-       ALIGN
-ia32_ptregs_common:
-       CFI_ENDPROC
-       CFI_STARTPROC32 simple
-       CFI_SIGNAL_FRAME
-       CFI_DEF_CFA     rsp,SIZEOF_PTREGS
-       CFI_REL_OFFSET  rax,RAX
-       CFI_REL_OFFSET  rcx,RCX
-       CFI_REL_OFFSET  rdx,RDX
-       CFI_REL_OFFSET  rsi,RSI
-       CFI_REL_OFFSET  rdi,RDI
-       CFI_REL_OFFSET  rip,RIP
-/*     CFI_REL_OFFSET  cs,CS*/
-/*     CFI_REL_OFFSET  rflags,EFLAGS*/
-       CFI_REL_OFFSET  rsp,RSP
-/*     CFI_REL_OFFSET  ss,SS*/
-       SAVE_EXTRA_REGS 8
-       call *%rax
-       RESTORE_EXTRA_REGS 8
-       ret
-       CFI_ENDPROC
-END(ia32_ptregs_common)
index bdf02eeee76519582b0fe9c35b631852b1b417d9..e7636bac7372d41d4b8077f4d7df7d81de32ee32 100644 (file)
        .endm
 #endif
 
+/*
+ * Issue one struct alt_instr descriptor entry (need to put it into
+ * the section .altinstructions, see below). This entry contains
+ * enough information for the alternatives patching code to patch an
+ * instruction. See apply_alternatives().
+ */
 .macro altinstruction_entry orig alt feature orig_len alt_len pad_len
        .long \orig - .
        .long \alt - .
        .byte \pad_len
 .endm
 
+/*
+ * Define an alternative between two instructions. If @feature is
+ * present, early code in apply_alternatives() replaces @oldinstr with
+ * @newinstr. ".skip" directive takes care of proper instruction padding
+ * in case @newinstr is longer than @oldinstr.
+ */
 .macro ALTERNATIVE oldinstr, newinstr, feature
 140:
        \oldinstr
  */
 #define alt_max_short(a, b)    ((a) ^ (((a) ^ (b)) & -(-((a) < (b)))))
 
+
+/*
+ * Same as ALTERNATIVE macro above but for two alternatives. If CPU
+ * has @feature1, it replaces @oldinstr with @newinstr1. If CPU has
+ * @feature2, it replaces @oldinstr with @feature2.
+ */
 .macro ALTERNATIVE_2 oldinstr, newinstr1, feature1, newinstr2, feature2
 140:
        \oldinstr
index ba32af062f61d69164a792630e3257c8cdc6deb5..7bfc85bbb8ffc0578011ceac2c08548bd140ade3 100644 (file)
@@ -52,6 +52,12 @@ struct alt_instr {
        u8  padlen;             /* length of build-time padding */
 } __packed;
 
+/*
+ * Debug flag that can be tested to see whether alternative
+ * instructions were patched in already:
+ */
+extern int alternatives_patched;
+
 extern void alternative_instructions(void);
 extern void apply_alternatives(struct alt_instr *start, struct alt_instr *end);
 
index aaac3b2fb746d3e61019f9e0804d7bf2913f8de2..1a5da2e63aeeebc062bd0f5c08b36e4bda32707d 100644 (file)
@@ -98,11 +98,22 @@ static inline u16 amd_get_node_id(struct pci_dev *pdev)
        return 0;
 }
 
+static inline bool amd_gart_present(void)
+{
+       /* GART present only on Fam15h, upto model 0fh */
+       if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
+           (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model < 0x10))
+               return true;
+
+       return false;
+}
+
 #else
 
 #define amd_nb_num(x)          0
 #define amd_nb_has_feature(x)  false
 #define node_to_amd_nb(x)      NULL
+#define amd_gart_present(x)    false
 
 #endif
 
index 976b86a325e55cedfd28c029455ddecb2c06b6be..c8393634ca0c50ff4f4acb490e3e5c57ffc5ef19 100644 (file)
@@ -644,6 +644,12 @@ static inline void entering_ack_irq(void)
        entering_irq();
 }
 
+static inline void ipi_entering_ack_irq(void)
+{
+       ack_APIC_irq();
+       irq_enter();
+}
+
 static inline void exiting_irq(void)
 {
        irq_exit();
index 7730c1c5c83aa7aaf6859170f812ad1f410c1a0b..189679aba703537393b87d699a4e11cbfe94e23c 100644 (file)
        _ASM_ALIGN ;                                            \
        _ASM_PTR (entry);                                       \
        .popsection
+
+.macro ALIGN_DESTINATION
+       /* check for bad alignment of destination */
+       movl %edi,%ecx
+       andl $7,%ecx
+       jz 102f                         /* already aligned */
+       subl $8,%ecx
+       negl %ecx
+       subl %ecx,%edx
+100:   movb (%rsi),%al
+101:   movb %al,(%rdi)
+       incq %rsi
+       incq %rdi
+       decl %ecx
+       jnz 100b
+102:
+       .section .fixup,"ax"
+103:   addl %ecx,%edx                  /* ecx is zerorest also */
+       jmp copy_user_handle_tail
+       .previous
+
+       _ASM_EXTABLE(100b,103b)
+       _ASM_EXTABLE(101b,103b)
+       .endm
+
 #else
 # define _ASM_EXTABLE(from,to)                                 \
        " .pushsection \"__ex_table\",\"a\"\n"                  \
index 5e5cd123fdfbc2b0fe90cabc5d27948d3ded267a..e9168955c42f4ee8b18e726e28ecacf39b75a7f2 100644 (file)
@@ -22,7 +22,7 @@
  *
  * Atomically reads the value of @v.
  */
-static inline int atomic_read(const atomic_t *v)
+static __always_inline int atomic_read(const atomic_t *v)
 {
        return ACCESS_ONCE((v)->counter);
 }
@@ -34,7 +34,7 @@ static inline int atomic_read(const atomic_t *v)
  *
  * Atomically sets the value of @v to @i.
  */
-static inline void atomic_set(atomic_t *v, int i)
+static __always_inline void atomic_set(atomic_t *v, int i)
 {
        v->counter = i;
 }
@@ -46,7 +46,7 @@ static inline void atomic_set(atomic_t *v, int i)
  *
  * Atomically adds @i to @v.
  */
-static inline void atomic_add(int i, atomic_t *v)
+static __always_inline void atomic_add(int i, atomic_t *v)
 {
        asm volatile(LOCK_PREFIX "addl %1,%0"
                     : "+m" (v->counter)
@@ -60,7 +60,7 @@ static inline void atomic_add(int i, atomic_t *v)
  *
  * Atomically subtracts @i from @v.
  */
-static inline void atomic_sub(int i, atomic_t *v)
+static __always_inline void atomic_sub(int i, atomic_t *v)
 {
        asm volatile(LOCK_PREFIX "subl %1,%0"
                     : "+m" (v->counter)
@@ -76,7 +76,7 @@ static inline void atomic_sub(int i, atomic_t *v)
  * true if the result is zero, or false for all
  * other cases.
  */
-static inline int atomic_sub_and_test(int i, atomic_t *v)
+static __always_inline int atomic_sub_and_test(int i, atomic_t *v)
 {
        GEN_BINARY_RMWcc(LOCK_PREFIX "subl", v->counter, "er", i, "%0", "e");
 }
@@ -87,7 +87,7 @@ static inline int atomic_sub_and_test(int i, atomic_t *v)
  *
  * Atomically increments @v by 1.
  */
-static inline void atomic_inc(atomic_t *v)
+static __always_inline void atomic_inc(atomic_t *v)
 {
        asm volatile(LOCK_PREFIX "incl %0"
                     : "+m" (v->counter));
@@ -99,7 +99,7 @@ static inline void atomic_inc(atomic_t *v)
  *
  * Atomically decrements @v by 1.
  */
-static inline void atomic_dec(atomic_t *v)
+static __always_inline void atomic_dec(atomic_t *v)
 {
        asm volatile(LOCK_PREFIX "decl %0"
                     : "+m" (v->counter));
@@ -113,7 +113,7 @@ static inline void atomic_dec(atomic_t *v)
  * returns true if the result is 0, or false for all other
  * cases.
  */
-static inline int atomic_dec_and_test(atomic_t *v)
+static __always_inline int atomic_dec_and_test(atomic_t *v)
 {
        GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
 }
@@ -126,7 +126,7 @@ static inline int atomic_dec_and_test(atomic_t *v)
  * and returns true if the result is zero, or false for all
  * other cases.
  */
-static inline int atomic_inc_and_test(atomic_t *v)
+static __always_inline int atomic_inc_and_test(atomic_t *v)
 {
        GEN_UNARY_RMWcc(LOCK_PREFIX "incl", v->counter, "%0", "e");
 }
@@ -140,7 +140,7 @@ static inline int atomic_inc_and_test(atomic_t *v)
  * if the result is negative, or false when
  * result is greater than or equal to zero.
  */
-static inline int atomic_add_negative(int i, atomic_t *v)
+static __always_inline int atomic_add_negative(int i, atomic_t *v)
 {
        GEN_BINARY_RMWcc(LOCK_PREFIX "addl", v->counter, "er", i, "%0", "s");
 }
@@ -152,7 +152,7 @@ static inline int atomic_add_negative(int i, atomic_t *v)
  *
  * Atomically adds @i to @v and returns @i + @v
  */
-static inline int atomic_add_return(int i, atomic_t *v)
+static __always_inline int atomic_add_return(int i, atomic_t *v)
 {
        return i + xadd(&v->counter, i);
 }
@@ -164,7 +164,7 @@ static inline int atomic_add_return(int i, atomic_t *v)
  *
  * Atomically subtracts @i from @v and returns @v - @i
  */
-static inline int atomic_sub_return(int i, atomic_t *v)
+static __always_inline int atomic_sub_return(int i, atomic_t *v)
 {
        return atomic_add_return(-i, v);
 }
@@ -172,7 +172,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)
 #define atomic_inc_return(v)  (atomic_add_return(1, v))
 #define atomic_dec_return(v)  (atomic_sub_return(1, v))
 
-static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
+static __always_inline int atomic_cmpxchg(atomic_t *v, int old, int new)
 {
        return cmpxchg(&v->counter, old, new);
 }
@@ -191,7 +191,7 @@ static inline int atomic_xchg(atomic_t *v, int new)
  * Atomically adds @a to @v, so long as @v was not already @u.
  * Returns the old value of @v.
  */
-static inline int __atomic_add_unless(atomic_t *v, int a, int u)
+static __always_inline int __atomic_add_unless(atomic_t *v, int a, int u)
 {
        int c, old;
        c = atomic_read(v);
@@ -213,7 +213,7 @@ static inline int __atomic_add_unless(atomic_t *v, int a, int u)
  * Atomically adds 1 to @v
  * Returns the new value of @u
  */
-static inline short int atomic_inc_short(short int *v)
+static __always_inline short int atomic_inc_short(short int *v)
 {
        asm(LOCK_PREFIX "addw $1, %0" : "+m" (*v));
        return *v;
index f8d273e18516dedf885bbafb16224c189913e14f..b965f9e03f2a04b2291fc0b62d537b6d9416bdaf 100644 (file)
@@ -40,7 +40,7 @@ static inline void atomic64_set(atomic64_t *v, long i)
  *
  * Atomically adds @i to @v.
  */
-static inline void atomic64_add(long i, atomic64_t *v)
+static __always_inline void atomic64_add(long i, atomic64_t *v)
 {
        asm volatile(LOCK_PREFIX "addq %1,%0"
                     : "=m" (v->counter)
@@ -81,7 +81,7 @@ static inline int atomic64_sub_and_test(long i, atomic64_t *v)
  *
  * Atomically increments @v by 1.
  */
-static inline void atomic64_inc(atomic64_t *v)
+static __always_inline void atomic64_inc(atomic64_t *v)
 {
        asm volatile(LOCK_PREFIX "incq %0"
                     : "=m" (v->counter)
@@ -94,7 +94,7 @@ static inline void atomic64_inc(atomic64_t *v)
  *
  * Atomically decrements @v by 1.
  */
-static inline void atomic64_dec(atomic64_t *v)
+static __always_inline void atomic64_dec(atomic64_t *v)
 {
        asm volatile(LOCK_PREFIX "decq %0"
                     : "=m" (v->counter)
@@ -148,7 +148,7 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
  *
  * Atomically adds @i to @v and returns @i + @v
  */
-static inline long atomic64_add_return(long i, atomic64_t *v)
+static __always_inline long atomic64_add_return(long i, atomic64_t *v)
 {
        return i + xadd(&v->counter, i);
 }
index 959e45b81fe29192b0f1c97a65e028e7314f603d..e51a8f803f55e30df69a6a02a20a86f9ed7ebdad 100644 (file)
 #define smp_mb()       mb()
 #define smp_rmb()      dma_rmb()
 #define smp_wmb()      barrier()
-#define set_mb(var, value) do { (void)xchg(&var, value); } while (0)
+#define smp_store_mb(var, value) do { (void)xchg(&var, value); } while (0)
 #else /* !SMP */
 #define smp_mb()       barrier()
 #define smp_rmb()      barrier()
 #define smp_wmb()      barrier()
-#define set_mb(var, value) do { var = value; barrier(); } while (0)
+#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
 #endif /* SMP */
 
 #define read_barrier_depends()         do { } while (0)
index 47c8e32f621a0efd0ee3d9dd3cfb8c8c4f440292..b6f7457d12e41f338832ad2fbf8e0ad83c55fa5e 100644 (file)
@@ -8,7 +8,7 @@
 /*
  * The set_memory_* API can be used to change various attributes of a virtual
  * address range. The attributes include:
- * Cachability   : UnCached, WriteCombining, WriteBack
+ * Cachability   : UnCached, WriteCombining, WriteThrough, WriteBack
  * Executability : eXeutable, NoteXecutable
  * Read/Write    : ReadOnly, ReadWrite
  * Presence      : NotPresent
 
 int _set_memory_uc(unsigned long addr, int numpages);
 int _set_memory_wc(unsigned long addr, int numpages);
+int _set_memory_wt(unsigned long addr, int numpages);
 int _set_memory_wb(unsigned long addr, int numpages);
 int set_memory_uc(unsigned long addr, int numpages);
 int set_memory_wc(unsigned long addr, int numpages);
+int set_memory_wt(unsigned long addr, int numpages);
 int set_memory_wb(unsigned long addr, int numpages);
 int set_memory_x(unsigned long addr, int numpages);
 int set_memory_nx(unsigned long addr, int numpages);
@@ -48,10 +50,12 @@ int set_memory_4k(unsigned long addr, int numpages);
 
 int set_memory_array_uc(unsigned long *addr, int addrinarray);
 int set_memory_array_wc(unsigned long *addr, int addrinarray);
+int set_memory_array_wt(unsigned long *addr, int addrinarray);
 int set_memory_array_wb(unsigned long *addr, int addrinarray);
 
 int set_pages_array_uc(struct page **pages, int addrinarray);
 int set_pages_array_wc(struct page **pages, int addrinarray);
+int set_pages_array_wt(struct page **pages, int addrinarray);
 int set_pages_array_wb(struct page **pages, int addrinarray);
 
 /*
diff --git a/arch/x86/include/asm/calling.h b/arch/x86/include/asm/calling.h
deleted file mode 100644 (file)
index 1c8b50e..0000000
+++ /dev/null
@@ -1,247 +0,0 @@
-/*
-
- x86 function call convention, 64-bit:
- -------------------------------------
-  arguments           |  callee-saved      | extra caller-saved | return
- [callee-clobbered]   |                    | [callee-clobbered] |
- ---------------------------------------------------------------------------
- rdi rsi rdx rcx r8-9 | rbx rbp [*] r12-15 | r10-11             | rax, rdx [**]
-
- ( rsp is obviously invariant across normal function calls. (gcc can 'merge'
-   functions when it sees tail-call optimization possibilities) rflags is
-   clobbered. Leftover arguments are passed over the stack frame.)
-
- [*]  In the frame-pointers case rbp is fixed to the stack frame.
-
- [**] for struct return values wider than 64 bits the return convention is a
-      bit more complex: up to 128 bits width we return small structures
-      straight in rax, rdx. For structures larger than that (3 words or
-      larger) the caller puts a pointer to an on-stack return struct
-      [allocated in the caller's stack frame] into the first argument - i.e.
-      into rdi. All other arguments shift up by one in this case.
-      Fortunately this case is rare in the kernel.
-
-For 32-bit we have the following conventions - kernel is built with
--mregparm=3 and -freg-struct-return:
-
- x86 function calling convention, 32-bit:
- ----------------------------------------
-  arguments         | callee-saved        | extra caller-saved | return
- [callee-clobbered] |                     | [callee-clobbered] |
- -------------------------------------------------------------------------
- eax edx ecx        | ebx edi esi ebp [*] | <none>             | eax, edx [**]
-
- ( here too esp is obviously invariant across normal function calls. eflags
-   is clobbered. Leftover arguments are passed over the stack frame. )
-
- [*]  In the frame-pointers case ebp is fixed to the stack frame.
-
- [**] We build with -freg-struct-return, which on 32-bit means similar
-      semantics as on 64-bit: edx can be used for a second return value
-      (i.e. covering integer and structure sizes up to 64 bits) - after that
-      it gets more complex and more expensive: 3-word or larger struct returns
-      get done in the caller's frame and the pointer to the return struct goes
-      into regparm0, i.e. eax - the other arguments shift up and the
-      function's register parameters degenerate to regparm=2 in essence.
-
-*/
-
-#include <asm/dwarf2.h>
-
-#ifdef CONFIG_X86_64
-
-/*
- * 64-bit system call stack frame layout defines and helpers,
- * for assembly code:
- */
-
-/* The layout forms the "struct pt_regs" on the stack: */
-/*
- * C ABI says these regs are callee-preserved. They aren't saved on kernel entry
- * unless syscall needs a complete, fully filled "struct pt_regs".
- */
-#define R15            0*8
-#define R14            1*8
-#define R13            2*8
-#define R12            3*8
-#define RBP            4*8
-#define RBX            5*8
-/* These regs are callee-clobbered. Always saved on kernel entry. */
-#define R11            6*8
-#define R10            7*8
-#define R9             8*8
-#define R8             9*8
-#define RAX            10*8
-#define RCX            11*8
-#define RDX            12*8
-#define RSI            13*8
-#define RDI            14*8
-/*
- * On syscall entry, this is syscall#. On CPU exception, this is error code.
- * On hw interrupt, it's IRQ number:
- */
-#define ORIG_RAX       15*8
-/* Return frame for iretq */
-#define RIP            16*8
-#define CS             17*8
-#define EFLAGS         18*8
-#define RSP            19*8
-#define SS             20*8
-
-#define SIZEOF_PTREGS  21*8
-
-       .macro ALLOC_PT_GPREGS_ON_STACK addskip=0
-       subq    $15*8+\addskip, %rsp
-       CFI_ADJUST_CFA_OFFSET 15*8+\addskip
-       .endm
-
-       .macro SAVE_C_REGS_HELPER offset=0 rax=1 rcx=1 r8910=1 r11=1
-       .if \r11
-       movq_cfi r11, 6*8+\offset
-       .endif
-       .if \r8910
-       movq_cfi r10, 7*8+\offset
-       movq_cfi r9,  8*8+\offset
-       movq_cfi r8,  9*8+\offset
-       .endif
-       .if \rax
-       movq_cfi rax, 10*8+\offset
-       .endif
-       .if \rcx
-       movq_cfi rcx, 11*8+\offset
-       .endif
-       movq_cfi rdx, 12*8+\offset
-       movq_cfi rsi, 13*8+\offset
-       movq_cfi rdi, 14*8+\offset
-       .endm
-       .macro SAVE_C_REGS offset=0
-       SAVE_C_REGS_HELPER \offset, 1, 1, 1, 1
-       .endm
-       .macro SAVE_C_REGS_EXCEPT_RAX_RCX offset=0
-       SAVE_C_REGS_HELPER \offset, 0, 0, 1, 1
-       .endm
-       .macro SAVE_C_REGS_EXCEPT_R891011
-       SAVE_C_REGS_HELPER 0, 1, 1, 0, 0
-       .endm
-       .macro SAVE_C_REGS_EXCEPT_RCX_R891011
-       SAVE_C_REGS_HELPER 0, 1, 0, 0, 0
-       .endm
-       .macro SAVE_C_REGS_EXCEPT_RAX_RCX_R11
-       SAVE_C_REGS_HELPER 0, 0, 0, 1, 0
-       .endm
-
-       .macro SAVE_EXTRA_REGS offset=0
-       movq_cfi r15, 0*8+\offset
-       movq_cfi r14, 1*8+\offset
-       movq_cfi r13, 2*8+\offset
-       movq_cfi r12, 3*8+\offset
-       movq_cfi rbp, 4*8+\offset
-       movq_cfi rbx, 5*8+\offset
-       .endm
-       .macro SAVE_EXTRA_REGS_RBP offset=0
-       movq_cfi rbp, 4*8+\offset
-       .endm
-
-       .macro RESTORE_EXTRA_REGS offset=0
-       movq_cfi_restore 0*8+\offset, r15
-       movq_cfi_restore 1*8+\offset, r14
-       movq_cfi_restore 2*8+\offset, r13
-       movq_cfi_restore 3*8+\offset, r12
-       movq_cfi_restore 4*8+\offset, rbp
-       movq_cfi_restore 5*8+\offset, rbx
-       .endm
-
-       .macro ZERO_EXTRA_REGS
-       xorl    %r15d, %r15d
-       xorl    %r14d, %r14d
-       xorl    %r13d, %r13d
-       xorl    %r12d, %r12d
-       xorl    %ebp, %ebp
-       xorl    %ebx, %ebx
-       .endm
-
-       .macro RESTORE_C_REGS_HELPER rstor_rax=1, rstor_rcx=1, rstor_r11=1, rstor_r8910=1, rstor_rdx=1
-       .if \rstor_r11
-       movq_cfi_restore 6*8, r11
-       .endif
-       .if \rstor_r8910
-       movq_cfi_restore 7*8, r10
-       movq_cfi_restore 8*8, r9
-       movq_cfi_restore 9*8, r8
-       .endif
-       .if \rstor_rax
-       movq_cfi_restore 10*8, rax
-       .endif
-       .if \rstor_rcx
-       movq_cfi_restore 11*8, rcx
-       .endif
-       .if \rstor_rdx
-       movq_cfi_restore 12*8, rdx
-       .endif
-       movq_cfi_restore 13*8, rsi
-       movq_cfi_restore 14*8, rdi
-       .endm
-       .macro RESTORE_C_REGS
-       RESTORE_C_REGS_HELPER 1,1,1,1,1
-       .endm
-       .macro RESTORE_C_REGS_EXCEPT_RAX
-       RESTORE_C_REGS_HELPER 0,1,1,1,1
-       .endm
-       .macro RESTORE_C_REGS_EXCEPT_RCX
-       RESTORE_C_REGS_HELPER 1,0,1,1,1
-       .endm
-       .macro RESTORE_C_REGS_EXCEPT_R11
-       RESTORE_C_REGS_HELPER 1,1,0,1,1
-       .endm
-       .macro RESTORE_C_REGS_EXCEPT_RCX_R11
-       RESTORE_C_REGS_HELPER 1,0,0,1,1
-       .endm
-       .macro RESTORE_RSI_RDI
-       RESTORE_C_REGS_HELPER 0,0,0,0,0
-       .endm
-       .macro RESTORE_RSI_RDI_RDX
-       RESTORE_C_REGS_HELPER 0,0,0,0,1
-       .endm
-
-       .macro REMOVE_PT_GPREGS_FROM_STACK addskip=0
-       addq $15*8+\addskip, %rsp
-       CFI_ADJUST_CFA_OFFSET -(15*8+\addskip)
-       .endm
-
-       .macro icebp
-       .byte 0xf1
-       .endm
-
-#else /* CONFIG_X86_64 */
-
-/*
- * For 32bit only simplified versions of SAVE_ALL/RESTORE_ALL. These
- * are different from the entry_32.S versions in not changing the segment
- * registers. So only suitable for in kernel use, not when transitioning
- * from or to user space. The resulting stack frame is not a standard
- * pt_regs frame. The main use case is calling C code from assembler
- * when all the registers need to be preserved.
- */
-
-       .macro SAVE_ALL
-       pushl_cfi_reg eax
-       pushl_cfi_reg ebp
-       pushl_cfi_reg edi
-       pushl_cfi_reg esi
-       pushl_cfi_reg edx
-       pushl_cfi_reg ecx
-       pushl_cfi_reg ebx
-       .endm
-
-       .macro RESTORE_ALL
-       popl_cfi_reg ebx
-       popl_cfi_reg ecx
-       popl_cfi_reg edx
-       popl_cfi_reg esi
-       popl_cfi_reg edi
-       popl_cfi_reg ebp
-       popl_cfi_reg eax
-       .endm
-
-#endif /* CONFIG_X86_64 */
-
index 99c105d78b7e123eec41d9bb5b2c308e5205d514..ad19841eddfe142fec6b2b27dc329059d8fb7296 100644 (file)
@@ -4,8 +4,6 @@
 #include <linux/compiler.h>
 #include <asm/alternative.h> /* Provides LOCK_PREFIX */
 
-#define __HAVE_ARCH_CMPXCHG 1
-
 /*
  * Non-existant functions to indicate usage errors at link time
  * (or compile-time if the compiler implements __compiletime_error().
index 1eef55596e82cade6ecc0e910488a6fcc9e0b74a..03bb1065c3352826843a305b15399751a5c860ae 100644 (file)
@@ -7,7 +7,7 @@
 
 #include <linux/kernel.h>
 #include <linux/crypto.h>
-#include <asm/i387.h>
+#include <asm/fpu/api.h>
 #include <crypto/b128ops.h>
 
 typedef void (*common_glue_func_t)(void *ctx, u8 *dst, const u8 *src);
index 808dae63eeea6f73eb312d4f5b7f77cf4e869e6c..1f5b7287d1ad8df92f789003018fec3913b03e1c 100644 (file)
@@ -127,50 +127,14 @@ static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
 
 #define dma_alloc_coherent(d,s,h,f)    dma_alloc_attrs(d,s,h,f,NULL)
 
-static inline void *
+void *
 dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
-               gfp_t gfp, struct dma_attrs *attrs)
-{
-       struct dma_map_ops *ops = get_dma_ops(dev);
-       void *memory;
-
-       gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
-
-       if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
-               return memory;
-
-       if (!dev)
-               dev = &x86_dma_fallback_dev;
-
-       if (!is_device_dma_capable(dev))
-               return NULL;
-
-       if (!ops->alloc)
-               return NULL;
-
-       memory = ops->alloc(dev, size, dma_handle,
-                           dma_alloc_coherent_gfp_flags(dev, gfp), attrs);
-       debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
-
-       return memory;
-}
+               gfp_t gfp, struct dma_attrs *attrs);
 
 #define dma_free_coherent(d,s,c,h) dma_free_attrs(d,s,c,h,NULL)
 
-static inline void dma_free_attrs(struct device *dev, size_t size,
-                                 void *vaddr, dma_addr_t bus,
-                                 struct dma_attrs *attrs)
-{
-       struct dma_map_ops *ops = get_dma_ops(dev);
-
-       WARN_ON(irqs_disabled());       /* for portability */
-
-       if (dma_release_from_coherent(dev, get_order(size), vaddr))
-               return;
-
-       debug_dma_free_coherent(dev, size, vaddr, bus);
-       if (ops->free)
-               ops->free(dev, size, vaddr, bus, attrs);
-}
+void dma_free_attrs(struct device *dev, size_t size,
+                   void *vaddr, dma_addr_t bus,
+                   struct dma_attrs *attrs);
 
 #endif
diff --git a/arch/x86/include/asm/dwarf2.h b/arch/x86/include/asm/dwarf2.h
deleted file mode 100644 (file)
index de1cdaf..0000000
+++ /dev/null
@@ -1,170 +0,0 @@
-#ifndef _ASM_X86_DWARF2_H
-#define _ASM_X86_DWARF2_H
-
-#ifndef __ASSEMBLY__
-#warning "asm/dwarf2.h should be only included in pure assembly files"
-#endif
-
-/*
- * Macros for dwarf2 CFI unwind table entries.
- * See "as.info" for details on these pseudo ops. Unfortunately
- * they are only supported in very new binutils, so define them
- * away for older version.
- */
-
-#ifdef CONFIG_AS_CFI
-
-#define CFI_STARTPROC          .cfi_startproc
-#define CFI_ENDPROC            .cfi_endproc
-#define CFI_DEF_CFA            .cfi_def_cfa
-#define CFI_DEF_CFA_REGISTER   .cfi_def_cfa_register
-#define CFI_DEF_CFA_OFFSET     .cfi_def_cfa_offset
-#define CFI_ADJUST_CFA_OFFSET  .cfi_adjust_cfa_offset
-#define CFI_OFFSET             .cfi_offset
-#define CFI_REL_OFFSET         .cfi_rel_offset
-#define CFI_REGISTER           .cfi_register
-#define CFI_RESTORE            .cfi_restore
-#define CFI_REMEMBER_STATE     .cfi_remember_state
-#define CFI_RESTORE_STATE      .cfi_restore_state
-#define CFI_UNDEFINED          .cfi_undefined
-#define CFI_ESCAPE             .cfi_escape
-
-#ifdef CONFIG_AS_CFI_SIGNAL_FRAME
-#define CFI_SIGNAL_FRAME       .cfi_signal_frame
-#else
-#define CFI_SIGNAL_FRAME
-#endif
-
-#if defined(CONFIG_AS_CFI_SECTIONS) && defined(__ASSEMBLY__)
-       /*
-        * Emit CFI data in .debug_frame sections, not .eh_frame sections.
-        * The latter we currently just discard since we don't do DWARF
-        * unwinding at runtime.  So only the offline DWARF information is
-        * useful to anyone.  Note we should not use this directive if this
-        * file is used in the vDSO assembly, or if vmlinux.lds.S gets
-        * changed so it doesn't discard .eh_frame.
-        */
-       .cfi_sections .debug_frame
-#endif
-
-#else
-
-/*
- * Due to the structure of pre-exisiting code, don't use assembler line
- * comment character # to ignore the arguments. Instead, use a dummy macro.
- */
-.macro cfi_ignore a=0, b=0, c=0, d=0
-.endm
-
-#define CFI_STARTPROC          cfi_ignore
-#define CFI_ENDPROC            cfi_ignore
-#define CFI_DEF_CFA            cfi_ignore
-#define CFI_DEF_CFA_REGISTER   cfi_ignore
-#define CFI_DEF_CFA_OFFSET     cfi_ignore
-#define CFI_ADJUST_CFA_OFFSET  cfi_ignore
-#define CFI_OFFSET             cfi_ignore
-#define CFI_REL_OFFSET         cfi_ignore
-#define CFI_REGISTER           cfi_ignore
-#define CFI_RESTORE            cfi_ignore
-#define CFI_REMEMBER_STATE     cfi_ignore
-#define CFI_RESTORE_STATE      cfi_ignore
-#define CFI_UNDEFINED          cfi_ignore
-#define CFI_ESCAPE             cfi_ignore
-#define CFI_SIGNAL_FRAME       cfi_ignore
-
-#endif
-
-/*
- * An attempt to make CFI annotations more or less
- * correct and shorter. It is implied that you know
- * what you're doing if you use them.
- */
-#ifdef __ASSEMBLY__
-#ifdef CONFIG_X86_64
-       .macro pushq_cfi reg
-       pushq \reg
-       CFI_ADJUST_CFA_OFFSET 8
-       .endm
-
-       .macro pushq_cfi_reg reg
-       pushq %\reg
-       CFI_ADJUST_CFA_OFFSET 8
-       CFI_REL_OFFSET \reg, 0
-       .endm
-
-       .macro popq_cfi reg
-       popq \reg
-       CFI_ADJUST_CFA_OFFSET -8
-       .endm
-
-       .macro popq_cfi_reg reg
-       popq %\reg
-       CFI_ADJUST_CFA_OFFSET -8
-       CFI_RESTORE \reg
-       .endm
-
-       .macro pushfq_cfi
-       pushfq
-       CFI_ADJUST_CFA_OFFSET 8
-       .endm
-
-       .macro popfq_cfi
-       popfq
-       CFI_ADJUST_CFA_OFFSET -8
-       .endm
-
-       .macro movq_cfi reg offset=0
-       movq %\reg, \offset(%rsp)
-       CFI_REL_OFFSET \reg, \offset
-       .endm
-
-       .macro movq_cfi_restore offset reg
-       movq \offset(%rsp), %\reg
-       CFI_RESTORE \reg
-       .endm
-#else /*!CONFIG_X86_64*/
-       .macro pushl_cfi reg
-       pushl \reg
-       CFI_ADJUST_CFA_OFFSET 4
-       .endm
-
-       .macro pushl_cfi_reg reg
-       pushl %\reg
-       CFI_ADJUST_CFA_OFFSET 4
-       CFI_REL_OFFSET \reg, 0
-       .endm
-
-       .macro popl_cfi reg
-       popl \reg
-       CFI_ADJUST_CFA_OFFSET -4
-       .endm
-
-       .macro popl_cfi_reg reg
-       popl %\reg
-       CFI_ADJUST_CFA_OFFSET -4
-       CFI_RESTORE \reg
-       .endm
-
-       .macro pushfl_cfi
-       pushfl
-       CFI_ADJUST_CFA_OFFSET 4
-       .endm
-
-       .macro popfl_cfi
-       popfl
-       CFI_ADJUST_CFA_OFFSET -4
-       .endm
-
-       .macro movl_cfi reg offset=0
-       movl %\reg, \offset(%esp)
-       CFI_REL_OFFSET \reg, \offset
-       .endm
-
-       .macro movl_cfi_restore offset reg
-       movl \offset(%esp), %\reg
-       CFI_RESTORE \reg
-       .endm
-#endif /*!CONFIG_X86_64*/
-#endif /*__ASSEMBLY__*/
-
-#endif /* _ASM_X86_DWARF2_H */
index 3738b138b843d46467c75a910d916cc79ebad25f..155162ea0e00292b619cc2a02ff8b2f31fafd02b 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef _ASM_X86_EFI_H
 #define _ASM_X86_EFI_H
 
-#include <asm/i387.h>
+#include <asm/fpu/api.h>
 #include <asm/pgtable.h>
 
 /*
index dc5fa661465f9a3fda9788c2f6caf9adc185a741..df002992d8fd3dffa9d9d0913e823ae52f9e2256 100644 (file)
@@ -23,6 +23,8 @@ BUILD_INTERRUPT(x86_platform_ipi, X86_PLATFORM_IPI_VECTOR)
 #ifdef CONFIG_HAVE_KVM
 BUILD_INTERRUPT3(kvm_posted_intr_ipi, POSTED_INTR_VECTOR,
                 smp_kvm_posted_intr_ipi)
+BUILD_INTERRUPT3(kvm_posted_intr_wakeup_ipi, POSTED_INTR_WAKEUP_VECTOR,
+                smp_kvm_posted_intr_wakeup_ipi)
 #endif
 
 /*
@@ -50,4 +52,7 @@ BUILD_INTERRUPT(thermal_interrupt,THERMAL_APIC_VECTOR)
 BUILD_INTERRUPT(threshold_interrupt,THRESHOLD_APIC_VECTOR)
 #endif
 
+#ifdef CONFIG_X86_MCE_AMD
+BUILD_INTERRUPT(deferred_error_interrupt, DEFERRED_ERROR_VECTOR)
+#endif
 #endif
diff --git a/arch/x86/include/asm/fpu-internal.h b/arch/x86/include/asm/fpu-internal.h
deleted file mode 100644 (file)
index da5e967..0000000
+++ /dev/null
@@ -1,626 +0,0 @@
-/*
- * Copyright (C) 1994 Linus Torvalds
- *
- * Pentium III FXSR, SSE support
- * General FPU state handling cleanups
- *     Gareth Hughes <gareth@valinux.com>, May 2000
- * x86-64 work by Andi Kleen 2002
- */
-
-#ifndef _FPU_INTERNAL_H
-#define _FPU_INTERNAL_H
-
-#include <linux/kernel_stat.h>
-#include <linux/regset.h>
-#include <linux/compat.h>
-#include <linux/slab.h>
-#include <asm/asm.h>
-#include <asm/cpufeature.h>
-#include <asm/processor.h>
-#include <asm/sigcontext.h>
-#include <asm/user.h>
-#include <asm/uaccess.h>
-#include <asm/xsave.h>
-#include <asm/smap.h>
-
-#ifdef CONFIG_X86_64
-# include <asm/sigcontext32.h>
-# include <asm/user32.h>
-struct ksignal;
-int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
-                       compat_sigset_t *set, struct pt_regs *regs);
-int ia32_setup_frame(int sig, struct ksignal *ksig,
-                    compat_sigset_t *set, struct pt_regs *regs);
-#else
-# define user_i387_ia32_struct user_i387_struct
-# define user32_fxsr_struct    user_fxsr_struct
-# define ia32_setup_frame      __setup_frame
-# define ia32_setup_rt_frame   __setup_rt_frame
-#endif
-
-extern unsigned int mxcsr_feature_mask;
-extern void fpu_init(void);
-extern void eager_fpu_init(void);
-
-DECLARE_PER_CPU(struct task_struct *, fpu_owner_task);
-
-extern void convert_from_fxsr(struct user_i387_ia32_struct *env,
-                             struct task_struct *tsk);
-extern void convert_to_fxsr(struct task_struct *tsk,
-                           const struct user_i387_ia32_struct *env);
-
-extern user_regset_active_fn fpregs_active, xfpregs_active;
-extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
-                               xstateregs_get;
-extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
-                                xstateregs_set;
-
-/*
- * xstateregs_active == fpregs_active. Please refer to the comment
- * at the definition of fpregs_active.
- */
-#define xstateregs_active      fpregs_active
-
-#ifdef CONFIG_MATH_EMULATION
-extern void finit_soft_fpu(struct i387_soft_struct *soft);
-#else
-static inline void finit_soft_fpu(struct i387_soft_struct *soft) {}
-#endif
-
-/*
- * Must be run with preemption disabled: this clears the fpu_owner_task,
- * on this CPU.
- *
- * This will disable any lazy FPU state restore of the current FPU state,
- * but if the current thread owns the FPU, it will still be saved by.
- */
-static inline void __cpu_disable_lazy_restore(unsigned int cpu)
-{
-       per_cpu(fpu_owner_task, cpu) = NULL;
-}
-
-/*
- * Used to indicate that the FPU state in memory is newer than the FPU
- * state in registers, and the FPU state should be reloaded next time the
- * task is run. Only safe on the current task, or non-running tasks.
- */
-static inline void task_disable_lazy_fpu_restore(struct task_struct *tsk)
-{
-       tsk->thread.fpu.last_cpu = ~0;
-}
-
-static inline int fpu_lazy_restore(struct task_struct *new, unsigned int cpu)
-{
-       return new == this_cpu_read_stable(fpu_owner_task) &&
-               cpu == new->thread.fpu.last_cpu;
-}
-
-static inline int is_ia32_compat_frame(void)
-{
-       return config_enabled(CONFIG_IA32_EMULATION) &&
-              test_thread_flag(TIF_IA32);
-}
-
-static inline int is_ia32_frame(void)
-{
-       return config_enabled(CONFIG_X86_32) || is_ia32_compat_frame();
-}
-
-static inline int is_x32_frame(void)
-{
-       return config_enabled(CONFIG_X86_X32_ABI) && test_thread_flag(TIF_X32);
-}
-
-#define X87_FSW_ES (1 << 7)    /* Exception Summary */
-
-static __always_inline __pure bool use_eager_fpu(void)
-{
-       return static_cpu_has_safe(X86_FEATURE_EAGER_FPU);
-}
-
-static __always_inline __pure bool use_xsaveopt(void)
-{
-       return static_cpu_has_safe(X86_FEATURE_XSAVEOPT);
-}
-
-static __always_inline __pure bool use_xsave(void)
-{
-       return static_cpu_has_safe(X86_FEATURE_XSAVE);
-}
-
-static __always_inline __pure bool use_fxsr(void)
-{
-       return static_cpu_has_safe(X86_FEATURE_FXSR);
-}
-
-static inline void fx_finit(struct i387_fxsave_struct *fx)
-{
-       fx->cwd = 0x37f;
-       fx->mxcsr = MXCSR_DEFAULT;
-}
-
-extern void __sanitize_i387_state(struct task_struct *);
-
-static inline void sanitize_i387_state(struct task_struct *tsk)
-{
-       if (!use_xsaveopt())
-               return;
-       __sanitize_i387_state(tsk);
-}
-
-#define user_insn(insn, output, input...)                              \
-({                                                                     \
-       int err;                                                        \
-       asm volatile(ASM_STAC "\n"                                      \
-                    "1:" #insn "\n\t"                                  \
-                    "2: " ASM_CLAC "\n"                                \
-                    ".section .fixup,\"ax\"\n"                         \
-                    "3:  movl $-1,%[err]\n"                            \
-                    "    jmp  2b\n"                                    \
-                    ".previous\n"                                      \
-                    _ASM_EXTABLE(1b, 3b)                               \
-                    : [err] "=r" (err), output                         \
-                    : "0"(0), input);                                  \
-       err;                                                            \
-})
-
-#define check_insn(insn, output, input...)                             \
-({                                                                     \
-       int err;                                                        \
-       asm volatile("1:" #insn "\n\t"                                  \
-                    "2:\n"                                             \
-                    ".section .fixup,\"ax\"\n"                         \
-                    "3:  movl $-1,%[err]\n"                            \
-                    "    jmp  2b\n"                                    \
-                    ".previous\n"                                      \
-                    _ASM_EXTABLE(1b, 3b)                               \
-                    : [err] "=r" (err), output                         \
-                    : "0"(0), input);                                  \
-       err;                                                            \
-})
-
-static inline int fsave_user(struct i387_fsave_struct __user *fx)
-{
-       return user_insn(fnsave %[fx]; fwait,  [fx] "=m" (*fx), "m" (*fx));
-}
-
-static inline int fxsave_user(struct i387_fxsave_struct __user *fx)
-{
-       if (config_enabled(CONFIG_X86_32))
-               return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
-       else if (config_enabled(CONFIG_AS_FXSAVEQ))
-               return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
-
-       /* See comment in fpu_fxsave() below. */
-       return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx));
-}
-
-static inline int fxrstor_checking(struct i387_fxsave_struct *fx)
-{
-       if (config_enabled(CONFIG_X86_32))
-               return check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
-       else if (config_enabled(CONFIG_AS_FXSAVEQ))
-               return check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
-
-       /* See comment in fpu_fxsave() below. */
-       return check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
-                         "m" (*fx));
-}
-
-static inline int fxrstor_user(struct i387_fxsave_struct __user *fx)
-{
-       if (config_enabled(CONFIG_X86_32))
-               return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
-       else if (config_enabled(CONFIG_AS_FXSAVEQ))
-               return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
-
-       /* See comment in fpu_fxsave() below. */
-       return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
-                         "m" (*fx));
-}
-
-static inline int frstor_checking(struct i387_fsave_struct *fx)
-{
-       return check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
-}
-
-static inline int frstor_user(struct i387_fsave_struct __user *fx)
-{
-       return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
-}
-
-static inline void fpu_fxsave(struct fpu *fpu)
-{
-       if (config_enabled(CONFIG_X86_32))
-               asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state->fxsave));
-       else if (config_enabled(CONFIG_AS_FXSAVEQ))
-               asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state->fxsave));
-       else {
-               /* Using "rex64; fxsave %0" is broken because, if the memory
-                * operand uses any extended registers for addressing, a second
-                * REX prefix will be generated (to the assembler, rex64
-                * followed by semicolon is a separate instruction), and hence
-                * the 64-bitness is lost.
-                *
-                * Using "fxsaveq %0" would be the ideal choice, but is only
-                * supported starting with gas 2.16.
-                *
-                * Using, as a workaround, the properly prefixed form below
-                * isn't accepted by any binutils version so far released,
-                * complaining that the same type of prefix is used twice if
-                * an extended register is needed for addressing (fix submitted
-                * to mainline 2005-11-21).
-                *
-                *  asm volatile("rex64/fxsave %0" : "=m" (fpu->state->fxsave));
-                *
-                * This, however, we can work around by forcing the compiler to
-                * select an addressing mode that doesn't require extended
-                * registers.
-                */
-               asm volatile( "rex64/fxsave (%[fx])"
-                            : "=m" (fpu->state->fxsave)
-                            : [fx] "R" (&fpu->state->fxsave));
-       }
-}
-
-/*
- * These must be called with preempt disabled. Returns
- * 'true' if the FPU state is still intact.
- */
-static inline int fpu_save_init(struct fpu *fpu)
-{
-       if (use_xsave()) {
-               fpu_xsave(fpu);
-
-               /*
-                * xsave header may indicate the init state of the FP.
-                */
-               if (!(fpu->state->xsave.xsave_hdr.xstate_bv & XSTATE_FP))
-                       return 1;
-       } else if (use_fxsr()) {
-               fpu_fxsave(fpu);
-       } else {
-               asm volatile("fnsave %[fx]; fwait"
-                            : [fx] "=m" (fpu->state->fsave));
-               return 0;
-       }
-
-       /*
-        * If exceptions are pending, we need to clear them so
-        * that we don't randomly get exceptions later.
-        *
-        * FIXME! Is this perhaps only true for the old-style
-        * irq13 case? Maybe we could leave the x87 state
-        * intact otherwise?
-        */
-       if (unlikely(fpu->state->fxsave.swd & X87_FSW_ES)) {
-               asm volatile("fnclex");
-               return 0;
-       }
-       return 1;
-}
-
-static inline int __save_init_fpu(struct task_struct *tsk)
-{
-       return fpu_save_init(&tsk->thread.fpu);
-}
-
-static inline int fpu_restore_checking(struct fpu *fpu)
-{
-       if (use_xsave())
-               return fpu_xrstor_checking(&fpu->state->xsave);
-       else if (use_fxsr())
-               return fxrstor_checking(&fpu->state->fxsave);
-       else
-               return frstor_checking(&fpu->state->fsave);
-}
-
-static inline int restore_fpu_checking(struct task_struct *tsk)
-{
-       /*
-        * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
-        * pending. Clear the x87 state here by setting it to fixed values.
-        * "m" is a random variable that should be in L1.
-        */
-       if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) {
-               asm volatile(
-                       "fnclex\n\t"
-                       "emms\n\t"
-                       "fildl %P[addr]"        /* set F?P to defined value */
-                       : : [addr] "m" (tsk->thread.fpu.has_fpu));
-       }
-
-       return fpu_restore_checking(&tsk->thread.fpu);
-}
-
-/*
- * Software FPU state helpers. Careful: these need to
- * be preemption protection *and* they need to be
- * properly paired with the CR0.TS changes!
- */
-static inline int __thread_has_fpu(struct task_struct *tsk)
-{
-       return tsk->thread.fpu.has_fpu;
-}
-
-/* Must be paired with an 'stts' after! */
-static inline void __thread_clear_has_fpu(struct task_struct *tsk)
-{
-       tsk->thread.fpu.has_fpu = 0;
-       this_cpu_write(fpu_owner_task, NULL);
-}
-
-/* Must be paired with a 'clts' before! */
-static inline void __thread_set_has_fpu(struct task_struct *tsk)
-{
-       tsk->thread.fpu.has_fpu = 1;
-       this_cpu_write(fpu_owner_task, tsk);
-}
-
-/*
- * Encapsulate the CR0.TS handling together with the
- * software flag.
- *
- * These generally need preemption protection to work,
- * do try to avoid using these on their own.
- */
-static inline void __thread_fpu_end(struct task_struct *tsk)
-{
-       __thread_clear_has_fpu(tsk);
-       if (!use_eager_fpu())
-               stts();
-}
-
-static inline void __thread_fpu_begin(struct task_struct *tsk)
-{
-       if (!use_eager_fpu())
-               clts();
-       __thread_set_has_fpu(tsk);
-}
-
-static inline void drop_fpu(struct task_struct *tsk)
-{
-       /*
-        * Forget coprocessor state..
-        */
-       preempt_disable();
-       tsk->thread.fpu_counter = 0;
-
-       if (__thread_has_fpu(tsk)) {
-               /* Ignore delayed exceptions from user space */
-               asm volatile("1: fwait\n"
-                            "2:\n"
-                            _ASM_EXTABLE(1b, 2b));
-               __thread_fpu_end(tsk);
-       }
-
-       clear_stopped_child_used_math(tsk);
-       preempt_enable();
-}
-
-static inline void restore_init_xstate(void)
-{
-       if (use_xsave())
-               xrstor_state(init_xstate_buf, -1);
-       else
-               fxrstor_checking(&init_xstate_buf->i387);
-}
-
-/*
- * Reset the FPU state in the eager case and drop it in the lazy case (later use
- * will reinit it).
- */
-static inline void fpu_reset_state(struct task_struct *tsk)
-{
-       if (!use_eager_fpu())
-               drop_fpu(tsk);
-       else
-               restore_init_xstate();
-}
-
-/*
- * FPU state switching for scheduling.
- *
- * This is a two-stage process:
- *
- *  - switch_fpu_prepare() saves the old state and
- *    sets the new state of the CR0.TS bit. This is
- *    done within the context of the old process.
- *
- *  - switch_fpu_finish() restores the new state as
- *    necessary.
- */
-typedef struct { int preload; } fpu_switch_t;
-
-static inline fpu_switch_t switch_fpu_prepare(struct task_struct *old, struct task_struct *new, int cpu)
-{
-       fpu_switch_t fpu;
-
-       /*
-        * If the task has used the math, pre-load the FPU on xsave processors
-        * or if the past 5 consecutive context-switches used math.
-        */
-       fpu.preload = tsk_used_math(new) &&
-                     (use_eager_fpu() || new->thread.fpu_counter > 5);
-
-       if (__thread_has_fpu(old)) {
-               if (!__save_init_fpu(old))
-                       task_disable_lazy_fpu_restore(old);
-               else
-                       old->thread.fpu.last_cpu = cpu;
-
-               /* But leave fpu_owner_task! */
-               old->thread.fpu.has_fpu = 0;
-
-               /* Don't change CR0.TS if we just switch! */
-               if (fpu.preload) {
-                       new->thread.fpu_counter++;
-                       __thread_set_has_fpu(new);
-                       prefetch(new->thread.fpu.state);
-               } else if (!use_eager_fpu())
-                       stts();
-       } else {
-               old->thread.fpu_counter = 0;
-               task_disable_lazy_fpu_restore(old);
-               if (fpu.preload) {
-                       new->thread.fpu_counter++;
-                       if (fpu_lazy_restore(new, cpu))
-                               fpu.preload = 0;
-                       else
-                               prefetch(new->thread.fpu.state);
-                       __thread_fpu_begin(new);
-               }
-       }
-       return fpu;
-}
-
-/*
- * By the time this gets called, we've already cleared CR0.TS and
- * given the process the FPU if we are going to preload the FPU
- * state - all we need to do is to conditionally restore the register
- * state itself.
- */
-static inline void switch_fpu_finish(struct task_struct *new, fpu_switch_t fpu)
-{
-       if (fpu.preload) {
-               if (unlikely(restore_fpu_checking(new)))
-                       fpu_reset_state(new);
-       }
-}
-
-/*
- * Signal frame handlers...
- */
-extern int save_xstate_sig(void __user *buf, void __user *fx, int size);
-extern int __restore_xstate_sig(void __user *buf, void __user *fx, int size);
-
-static inline int xstate_sigframe_size(void)
-{
-       return use_xsave() ? xstate_size + FP_XSTATE_MAGIC2_SIZE : xstate_size;
-}
-
-static inline int restore_xstate_sig(void __user *buf, int ia32_frame)
-{
-       void __user *buf_fx = buf;
-       int size = xstate_sigframe_size();
-
-       if (ia32_frame && use_fxsr()) {
-               buf_fx = buf + sizeof(struct i387_fsave_struct);
-               size += sizeof(struct i387_fsave_struct);
-       }
-
-       return __restore_xstate_sig(buf, buf_fx, size);
-}
-
-/*
- * Needs to be preemption-safe.
- *
- * NOTE! user_fpu_begin() must be used only immediately before restoring
- * the save state. It does not do any saving/restoring on its own. In
- * lazy FPU mode, it is just an optimization to avoid a #NM exception,
- * the task can lose the FPU right after preempt_enable().
- */
-static inline void user_fpu_begin(void)
-{
-       preempt_disable();
-       if (!user_has_fpu())
-               __thread_fpu_begin(current);
-       preempt_enable();
-}
-
-static inline void __save_fpu(struct task_struct *tsk)
-{
-       if (use_xsave()) {
-               if (unlikely(system_state == SYSTEM_BOOTING))
-                       xsave_state_booting(&tsk->thread.fpu.state->xsave, -1);
-               else
-                       xsave_state(&tsk->thread.fpu.state->xsave, -1);
-       } else
-               fpu_fxsave(&tsk->thread.fpu);
-}
-
-/*
- * i387 state interaction
- */
-static inline unsigned short get_fpu_cwd(struct task_struct *tsk)
-{
-       if (cpu_has_fxsr) {
-               return tsk->thread.fpu.state->fxsave.cwd;
-       } else {
-               return (unsigned short)tsk->thread.fpu.state->fsave.cwd;
-       }
-}
-
-static inline unsigned short get_fpu_swd(struct task_struct *tsk)
-{
-       if (cpu_has_fxsr) {
-               return tsk->thread.fpu.state->fxsave.swd;
-       } else {
-               return (unsigned short)tsk->thread.fpu.state->fsave.swd;
-       }
-}
-
-static inline unsigned short get_fpu_mxcsr(struct task_struct *tsk)
-{
-       if (cpu_has_xmm) {
-               return tsk->thread.fpu.state->fxsave.mxcsr;
-       } else {
-               return MXCSR_DEFAULT;
-       }
-}
-
-static bool fpu_allocated(struct fpu *fpu)
-{
-       return fpu->state != NULL;
-}
-
-static inline int fpu_alloc(struct fpu *fpu)
-{
-       if (fpu_allocated(fpu))
-               return 0;
-       fpu->state = kmem_cache_alloc(task_xstate_cachep, GFP_KERNEL);
-       if (!fpu->state)
-               return -ENOMEM;
-       WARN_ON((unsigned long)fpu->state & 15);
-       return 0;
-}
-
-static inline void fpu_free(struct fpu *fpu)
-{
-       if (fpu->state) {
-               kmem_cache_free(task_xstate_cachep, fpu->state);
-               fpu->state = NULL;
-       }
-}
-
-static inline void fpu_copy(struct task_struct *dst, struct task_struct *src)
-{
-       if (use_eager_fpu()) {
-               memset(&dst->thread.fpu.state->xsave, 0, xstate_size);
-               __save_fpu(dst);
-       } else {
-               struct fpu *dfpu = &dst->thread.fpu;
-               struct fpu *sfpu = &src->thread.fpu;
-
-               unlazy_fpu(src);
-               memcpy(dfpu->state, sfpu->state, xstate_size);
-       }
-}
-
-static inline unsigned long
-alloc_mathframe(unsigned long sp, int ia32_frame, unsigned long *buf_fx,
-               unsigned long *size)
-{
-       unsigned long frame_size = xstate_sigframe_size();
-
-       *buf_fx = sp = round_down(sp - frame_size, 64);
-       if (ia32_frame && use_fxsr()) {
-               frame_size += sizeof(struct i387_fsave_struct);
-               sp -= sizeof(struct i387_fsave_struct);
-       }
-
-       *size = frame_size;
-       return sp;
-}
-
-#endif
diff --git a/arch/x86/include/asm/fpu/api.h b/arch/x86/include/asm/fpu/api.h
new file mode 100644 (file)
index 0000000..1429a7c
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 1994 Linus Torvalds
+ *
+ * Pentium III FXSR, SSE support
+ * General FPU state handling cleanups
+ *     Gareth Hughes <gareth@valinux.com>, May 2000
+ * x86-64 work by Andi Kleen 2002
+ */
+
+#ifndef _ASM_X86_FPU_API_H
+#define _ASM_X86_FPU_API_H
+
+/*
+ * Careful: __kernel_fpu_begin/end() must be called with preempt disabled
+ * and they don't touch the preempt state on their own.
+ * If you enable preemption after __kernel_fpu_begin(), preempt notifier
+ * should call the __kernel_fpu_end() to prevent the kernel/user FPU
+ * state from getting corrupted. KVM for example uses this model.
+ *
+ * All other cases use kernel_fpu_begin/end() which disable preemption
+ * during kernel FPU usage.
+ */
+extern void __kernel_fpu_begin(void);
+extern void __kernel_fpu_end(void);
+extern void kernel_fpu_begin(void);
+extern void kernel_fpu_end(void);
+extern bool irq_fpu_usable(void);
+
+/*
+ * Some instructions like VIA's padlock instructions generate a spurious
+ * DNA fault but don't modify SSE registers. And these instructions
+ * get used from interrupt context as well. To prevent these kernel instructions
+ * in interrupt context interacting wrongly with other user/kernel fpu usage, we
+ * should use them only in the context of irq_ts_save/restore()
+ */
+extern int  irq_ts_save(void);
+extern void irq_ts_restore(int TS_state);
+
+/*
+ * Query the presence of one or more xfeatures. Works on any legacy CPU as well.
+ *
+ * If 'feature_name' is set then put a human-readable description of
+ * the feature there as well - this can be used to print error (or success)
+ * messages.
+ */
+extern int cpu_has_xfeatures(u64 xfeatures_mask, const char **feature_name);
+
+#endif /* _ASM_X86_FPU_API_H */
diff --git a/arch/x86/include/asm/fpu/internal.h b/arch/x86/include/asm/fpu/internal.h
new file mode 100644 (file)
index 0000000..3c3550c
--- /dev/null
@@ -0,0 +1,694 @@
+/*
+ * Copyright (C) 1994 Linus Torvalds
+ *
+ * Pentium III FXSR, SSE support
+ * General FPU state handling cleanups
+ *     Gareth Hughes <gareth@valinux.com>, May 2000
+ * x86-64 work by Andi Kleen 2002
+ */
+
+#ifndef _ASM_X86_FPU_INTERNAL_H
+#define _ASM_X86_FPU_INTERNAL_H
+
+#include <linux/compat.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+
+#include <asm/user.h>
+#include <asm/fpu/api.h>
+#include <asm/fpu/xstate.h>
+
+/*
+ * High level FPU state handling functions:
+ */
+extern void fpu__activate_curr(struct fpu *fpu);
+extern void fpu__activate_fpstate_read(struct fpu *fpu);
+extern void fpu__activate_fpstate_write(struct fpu *fpu);
+extern void fpu__save(struct fpu *fpu);
+extern void fpu__restore(struct fpu *fpu);
+extern int  fpu__restore_sig(void __user *buf, int ia32_frame);
+extern void fpu__drop(struct fpu *fpu);
+extern int  fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu);
+extern void fpu__clear(struct fpu *fpu);
+extern int  fpu__exception_code(struct fpu *fpu, int trap_nr);
+extern int  dump_fpu(struct pt_regs *ptregs, struct user_i387_struct *fpstate);
+
+/*
+ * Boot time FPU initialization functions:
+ */
+extern void fpu__init_cpu(void);
+extern void fpu__init_system_xstate(void);
+extern void fpu__init_cpu_xstate(void);
+extern void fpu__init_system(struct cpuinfo_x86 *c);
+extern void fpu__init_check_bugs(void);
+extern void fpu__resume_cpu(void);
+
+/*
+ * Debugging facility:
+ */
+#ifdef CONFIG_X86_DEBUG_FPU
+# define WARN_ON_FPU(x) WARN_ON_ONCE(x)
+#else
+# define WARN_ON_FPU(x) ({ (void)(x); 0; })
+#endif
+
+/*
+ * FPU related CPU feature flag helper routines:
+ */
+static __always_inline __pure bool use_eager_fpu(void)
+{
+       return static_cpu_has_safe(X86_FEATURE_EAGER_FPU);
+}
+
+static __always_inline __pure bool use_xsaveopt(void)
+{
+       return static_cpu_has_safe(X86_FEATURE_XSAVEOPT);
+}
+
+static __always_inline __pure bool use_xsave(void)
+{
+       return static_cpu_has_safe(X86_FEATURE_XSAVE);
+}
+
+static __always_inline __pure bool use_fxsr(void)
+{
+       return static_cpu_has_safe(X86_FEATURE_FXSR);
+}
+
+/*
+ * fpstate handling functions:
+ */
+
+extern union fpregs_state init_fpstate;
+
+extern void fpstate_init(union fpregs_state *state);
+#ifdef CONFIG_MATH_EMULATION
+extern void fpstate_init_soft(struct swregs_state *soft);
+#else
+static inline void fpstate_init_soft(struct swregs_state *soft) {}
+#endif
+static inline void fpstate_init_fxstate(struct fxregs_state *fx)
+{
+       fx->cwd = 0x37f;
+       fx->mxcsr = MXCSR_DEFAULT;
+}
+extern void fpstate_sanitize_xstate(struct fpu *fpu);
+
+#define user_insn(insn, output, input...)                              \
+({                                                                     \
+       int err;                                                        \
+       asm volatile(ASM_STAC "\n"                                      \
+                    "1:" #insn "\n\t"                                  \
+                    "2: " ASM_CLAC "\n"                                \
+                    ".section .fixup,\"ax\"\n"                         \
+                    "3:  movl $-1,%[err]\n"                            \
+                    "    jmp  2b\n"                                    \
+                    ".previous\n"                                      \
+                    _ASM_EXTABLE(1b, 3b)                               \
+                    : [err] "=r" (err), output                         \
+                    : "0"(0), input);                                  \
+       err;                                                            \
+})
+
+#define check_insn(insn, output, input...)                             \
+({                                                                     \
+       int err;                                                        \
+       asm volatile("1:" #insn "\n\t"                                  \
+                    "2:\n"                                             \
+                    ".section .fixup,\"ax\"\n"                         \
+                    "3:  movl $-1,%[err]\n"                            \
+                    "    jmp  2b\n"                                    \
+                    ".previous\n"                                      \
+                    _ASM_EXTABLE(1b, 3b)                               \
+                    : [err] "=r" (err), output                         \
+                    : "0"(0), input);                                  \
+       err;                                                            \
+})
+
+static inline int copy_fregs_to_user(struct fregs_state __user *fx)
+{
+       return user_insn(fnsave %[fx]; fwait,  [fx] "=m" (*fx), "m" (*fx));
+}
+
+static inline int copy_fxregs_to_user(struct fxregs_state __user *fx)
+{
+       if (config_enabled(CONFIG_X86_32))
+               return user_insn(fxsave %[fx], [fx] "=m" (*fx), "m" (*fx));
+       else if (config_enabled(CONFIG_AS_FXSAVEQ))
+               return user_insn(fxsaveq %[fx], [fx] "=m" (*fx), "m" (*fx));
+
+       /* See comment in copy_fxregs_to_kernel() below. */
+       return user_insn(rex64/fxsave (%[fx]), "=m" (*fx), [fx] "R" (fx));
+}
+
+static inline void copy_kernel_to_fxregs(struct fxregs_state *fx)
+{
+       int err;
+
+       if (config_enabled(CONFIG_X86_32)) {
+               err = check_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
+       } else {
+               if (config_enabled(CONFIG_AS_FXSAVEQ)) {
+                       err = check_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
+               } else {
+                       /* See comment in copy_fxregs_to_kernel() below. */
+                       err = check_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx), "m" (*fx));
+               }
+       }
+       /* Copying from a kernel buffer to FPU registers should never fail: */
+       WARN_ON_FPU(err);
+}
+
+static inline int copy_user_to_fxregs(struct fxregs_state __user *fx)
+{
+       if (config_enabled(CONFIG_X86_32))
+               return user_insn(fxrstor %[fx], "=m" (*fx), [fx] "m" (*fx));
+       else if (config_enabled(CONFIG_AS_FXSAVEQ))
+               return user_insn(fxrstorq %[fx], "=m" (*fx), [fx] "m" (*fx));
+
+       /* See comment in copy_fxregs_to_kernel() below. */
+       return user_insn(rex64/fxrstor (%[fx]), "=m" (*fx), [fx] "R" (fx),
+                         "m" (*fx));
+}
+
+static inline void copy_kernel_to_fregs(struct fregs_state *fx)
+{
+       int err = check_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
+
+       WARN_ON_FPU(err);
+}
+
+static inline int copy_user_to_fregs(struct fregs_state __user *fx)
+{
+       return user_insn(frstor %[fx], "=m" (*fx), [fx] "m" (*fx));
+}
+
+static inline void copy_fxregs_to_kernel(struct fpu *fpu)
+{
+       if (config_enabled(CONFIG_X86_32))
+               asm volatile( "fxsave %[fx]" : [fx] "=m" (fpu->state.fxsave));
+       else if (config_enabled(CONFIG_AS_FXSAVEQ))
+               asm volatile("fxsaveq %[fx]" : [fx] "=m" (fpu->state.fxsave));
+       else {
+               /* Using "rex64; fxsave %0" is broken because, if the memory
+                * operand uses any extended registers for addressing, a second
+                * REX prefix will be generated (to the assembler, rex64
+                * followed by semicolon is a separate instruction), and hence
+                * the 64-bitness is lost.
+                *
+                * Using "fxsaveq %0" would be the ideal choice, but is only
+                * supported starting with gas 2.16.
+                *
+                * Using, as a workaround, the properly prefixed form below
+                * isn't accepted by any binutils version so far released,
+                * complaining that the same type of prefix is used twice if
+                * an extended register is needed for addressing (fix submitted
+                * to mainline 2005-11-21).
+                *
+                *  asm volatile("rex64/fxsave %0" : "=m" (fpu->state.fxsave));
+                *
+                * This, however, we can work around by forcing the compiler to
+                * select an addressing mode that doesn't require extended
+                * registers.
+                */
+               asm volatile( "rex64/fxsave (%[fx])"
+                            : "=m" (fpu->state.fxsave)
+                            : [fx] "R" (&fpu->state.fxsave));
+       }
+}
+
+/* These macros all use (%edi)/(%rdi) as the single memory argument. */
+#define XSAVE          ".byte " REX_PREFIX "0x0f,0xae,0x27"
+#define XSAVEOPT       ".byte " REX_PREFIX "0x0f,0xae,0x37"
+#define XSAVES         ".byte " REX_PREFIX "0x0f,0xc7,0x2f"
+#define XRSTOR         ".byte " REX_PREFIX "0x0f,0xae,0x2f"
+#define XRSTORS                ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
+
+/* xstate instruction fault handler: */
+#define xstate_fault(__err)            \
+                                       \
+       ".section .fixup,\"ax\"\n"      \
+                                       \
+       "3:  movl $-2,%[_err]\n"        \
+       "    jmp  2b\n"                 \
+                                       \
+       ".previous\n"                   \
+                                       \
+       _ASM_EXTABLE(1b, 3b)            \
+       : [_err] "=r" (__err)
+
+/*
+ * This function is called only during boot time when x86 caps are not set
+ * up and alternative can not be used yet.
+ */
+static inline void copy_xregs_to_kernel_booting(struct xregs_state *xstate)
+{
+       u64 mask = -1;
+       u32 lmask = mask;
+       u32 hmask = mask >> 32;
+       int err = 0;
+
+       WARN_ON(system_state != SYSTEM_BOOTING);
+
+       if (boot_cpu_has(X86_FEATURE_XSAVES))
+               asm volatile("1:"XSAVES"\n\t"
+                       "2:\n\t"
+                            xstate_fault(err)
+                       : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
+                       : "memory");
+       else
+               asm volatile("1:"XSAVE"\n\t"
+                       "2:\n\t"
+                            xstate_fault(err)
+                       : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
+                       : "memory");
+
+       /* We should never fault when copying to a kernel buffer: */
+       WARN_ON_FPU(err);
+}
+
+/*
+ * This function is called only during boot time when x86 caps are not set
+ * up and alternative can not be used yet.
+ */
+static inline void copy_kernel_to_xregs_booting(struct xregs_state *xstate)
+{
+       u64 mask = -1;
+       u32 lmask = mask;
+       u32 hmask = mask >> 32;
+       int err = 0;
+
+       WARN_ON(system_state != SYSTEM_BOOTING);
+
+       if (boot_cpu_has(X86_FEATURE_XSAVES))
+               asm volatile("1:"XRSTORS"\n\t"
+                       "2:\n\t"
+                            xstate_fault(err)
+                       : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
+                       : "memory");
+       else
+               asm volatile("1:"XRSTOR"\n\t"
+                       "2:\n\t"
+                            xstate_fault(err)
+                       : "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask), "0" (err)
+                       : "memory");
+
+       /* We should never fault when copying from a kernel buffer: */
+       WARN_ON_FPU(err);
+}
+
+/*
+ * Save processor xstate to xsave area.
+ */
+static inline void copy_xregs_to_kernel(struct xregs_state *xstate)
+{
+       u64 mask = -1;
+       u32 lmask = mask;
+       u32 hmask = mask >> 32;
+       int err = 0;
+
+       WARN_ON(!alternatives_patched);
+
+       /*
+        * If xsaves is enabled, xsaves replaces xsaveopt because
+        * it supports compact format and supervisor states in addition to
+        * modified optimization in xsaveopt.
+        *
+        * Otherwise, if xsaveopt is enabled, xsaveopt replaces xsave
+        * because xsaveopt supports modified optimization which is not
+        * supported by xsave.
+        *
+        * If none of xsaves and xsaveopt is enabled, use xsave.
+        */
+       alternative_input_2(
+               "1:"XSAVE,
+               XSAVEOPT,
+               X86_FEATURE_XSAVEOPT,
+               XSAVES,
+               X86_FEATURE_XSAVES,
+               [xstate] "D" (xstate), "a" (lmask), "d" (hmask) :
+               "memory");
+       asm volatile("2:\n\t"
+                    xstate_fault(err)
+                    : "0" (err)
+                    : "memory");
+
+       /* We should never fault when copying to a kernel buffer: */
+       WARN_ON_FPU(err);
+}
+
+/*
+ * Restore processor xstate from xsave area.
+ */
+static inline void copy_kernel_to_xregs(struct xregs_state *xstate, u64 mask)
+{
+       u32 lmask = mask;
+       u32 hmask = mask >> 32;
+       int err = 0;
+
+       /*
+        * Use xrstors to restore context if it is enabled. xrstors supports
+        * compacted format of xsave area which is not supported by xrstor.
+        */
+       alternative_input(
+               "1: " XRSTOR,
+               XRSTORS,
+               X86_FEATURE_XSAVES,
+               "D" (xstate), "m" (*xstate), "a" (lmask), "d" (hmask)
+               : "memory");
+
+       asm volatile("2:\n"
+                    xstate_fault(err)
+                    : "0" (err)
+                    : "memory");
+
+       /* We should never fault when copying from a kernel buffer: */
+       WARN_ON_FPU(err);
+}
+
+/*
+ * Save xstate to user space xsave area.
+ *
+ * We don't use modified optimization because xrstor/xrstors might track
+ * a different application.
+ *
+ * We don't use compacted format xsave area for
+ * backward compatibility for old applications which don't understand
+ * compacted format of xsave area.
+ */
+static inline int copy_xregs_to_user(struct xregs_state __user *buf)
+{
+       int err;
+
+       /*
+        * Clear the xsave header first, so that reserved fields are
+        * initialized to zero.
+        */
+       err = __clear_user(&buf->header, sizeof(buf->header));
+       if (unlikely(err))
+               return -EFAULT;
+
+       __asm__ __volatile__(ASM_STAC "\n"
+                            "1:"XSAVE"\n"
+                            "2: " ASM_CLAC "\n"
+                            xstate_fault(err)
+                            : "D" (buf), "a" (-1), "d" (-1), "0" (err)
+                            : "memory");
+       return err;
+}
+
+/*
+ * Restore xstate from user space xsave area.
+ */
+static inline int copy_user_to_xregs(struct xregs_state __user *buf, u64 mask)
+{
+       struct xregs_state *xstate = ((__force struct xregs_state *)buf);
+       u32 lmask = mask;
+       u32 hmask = mask >> 32;
+       int err = 0;
+
+       __asm__ __volatile__(ASM_STAC "\n"
+                            "1:"XRSTOR"\n"
+                            "2: " ASM_CLAC "\n"
+                            xstate_fault(err)
+                            : "D" (xstate), "a" (lmask), "d" (hmask), "0" (err)
+                            : "memory");       /* memory required? */
+       return err;
+}
+
+/*
+ * These must be called with preempt disabled. Returns
+ * 'true' if the FPU state is still intact and we can
+ * keep registers active.
+ *
+ * The legacy FNSAVE instruction cleared all FPU state
+ * unconditionally, so registers are essentially destroyed.
+ * Modern FPU state can be kept in registers, if there are
+ * no pending FP exceptions.
+ */
+static inline int copy_fpregs_to_fpstate(struct fpu *fpu)
+{
+       if (likely(use_xsave())) {
+               copy_xregs_to_kernel(&fpu->state.xsave);
+               return 1;
+       }
+
+       if (likely(use_fxsr())) {
+               copy_fxregs_to_kernel(fpu);
+               return 1;
+       }
+
+       /*
+        * Legacy FPU register saving, FNSAVE always clears FPU registers,
+        * so we have to mark them inactive:
+        */
+       asm volatile("fnsave %[fp]; fwait" : [fp] "=m" (fpu->state.fsave));
+
+       return 0;
+}
+
+static inline void __copy_kernel_to_fpregs(union fpregs_state *fpstate)
+{
+       if (use_xsave()) {
+               copy_kernel_to_xregs(&fpstate->xsave, -1);
+       } else {
+               if (use_fxsr())
+                       copy_kernel_to_fxregs(&fpstate->fxsave);
+               else
+                       copy_kernel_to_fregs(&fpstate->fsave);
+       }
+}
+
+static inline void copy_kernel_to_fpregs(union fpregs_state *fpstate)
+{
+       /*
+        * AMD K7/K8 CPUs don't save/restore FDP/FIP/FOP unless an exception is
+        * pending. Clear the x87 state here by setting it to fixed values.
+        * "m" is a random variable that should be in L1.
+        */
+       if (unlikely(static_cpu_has_bug_safe(X86_BUG_FXSAVE_LEAK))) {
+               asm volatile(
+                       "fnclex\n\t"
+                       "emms\n\t"
+                       "fildl %P[addr]"        /* set F?P to defined value */
+                       : : [addr] "m" (fpstate));
+       }
+
+       __copy_kernel_to_fpregs(fpstate);
+}
+
+extern int copy_fpstate_to_sigframe(void __user *buf, void __user *fp, int size);
+
+/*
+ * FPU context switch related helper methods:
+ */
+
+DECLARE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
+
+/*
+ * Must be run with preemption disabled: this clears the fpu_fpregs_owner_ctx,
+ * on this CPU.
+ *
+ * This will disable any lazy FPU state restore of the current FPU state,
+ * but if the current thread owns the FPU, it will still be saved by.
+ */
+static inline void __cpu_disable_lazy_restore(unsigned int cpu)
+{
+       per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL;
+}
+
+static inline int fpu_want_lazy_restore(struct fpu *fpu, unsigned int cpu)
+{
+       return fpu == this_cpu_read_stable(fpu_fpregs_owner_ctx) && cpu == fpu->last_cpu;
+}
+
+
+/*
+ * Wrap lazy FPU TS handling in a 'hw fpregs activation/deactivation'
+ * idiom, which is then paired with the sw-flag (fpregs_active) later on:
+ */
+
+static inline void __fpregs_activate_hw(void)
+{
+       if (!use_eager_fpu())
+               clts();
+}
+
+static inline void __fpregs_deactivate_hw(void)
+{
+       if (!use_eager_fpu())
+               stts();
+}
+
+/* Must be paired with an 'stts' (fpregs_deactivate_hw()) after! */
+static inline void __fpregs_deactivate(struct fpu *fpu)
+{
+       WARN_ON_FPU(!fpu->fpregs_active);
+
+       fpu->fpregs_active = 0;
+       this_cpu_write(fpu_fpregs_owner_ctx, NULL);
+}
+
+/* Must be paired with a 'clts' (fpregs_activate_hw()) before! */
+static inline void __fpregs_activate(struct fpu *fpu)
+{
+       WARN_ON_FPU(fpu->fpregs_active);
+
+       fpu->fpregs_active = 1;
+       this_cpu_write(fpu_fpregs_owner_ctx, fpu);
+}
+
+/*
+ * The question "does this thread have fpu access?"
+ * is slightly racy, since preemption could come in
+ * and revoke it immediately after the test.
+ *
+ * However, even in that very unlikely scenario,
+ * we can just assume we have FPU access - typically
+ * to save the FP state - we'll just take a #NM
+ * fault and get the FPU access back.
+ */
+static inline int fpregs_active(void)
+{
+       return current->thread.fpu.fpregs_active;
+}
+
+/*
+ * Encapsulate the CR0.TS handling together with the
+ * software flag.
+ *
+ * These generally need preemption protection to work,
+ * do try to avoid using these on their own.
+ */
+static inline void fpregs_activate(struct fpu *fpu)
+{
+       __fpregs_activate_hw();
+       __fpregs_activate(fpu);
+}
+
+static inline void fpregs_deactivate(struct fpu *fpu)
+{
+       __fpregs_deactivate(fpu);
+       __fpregs_deactivate_hw();
+}
+
+/*
+ * FPU state switching for scheduling.
+ *
+ * This is a two-stage process:
+ *
+ *  - switch_fpu_prepare() saves the old state and
+ *    sets the new state of the CR0.TS bit. This is
+ *    done within the context of the old process.
+ *
+ *  - switch_fpu_finish() restores the new state as
+ *    necessary.
+ */
+typedef struct { int preload; } fpu_switch_t;
+
+static inline fpu_switch_t
+switch_fpu_prepare(struct fpu *old_fpu, struct fpu *new_fpu, int cpu)
+{
+       fpu_switch_t fpu;
+
+       /*
+        * If the task has used the math, pre-load the FPU on xsave processors
+        * or if the past 5 consecutive context-switches used math.
+        */
+       fpu.preload = new_fpu->fpstate_active &&
+                     (use_eager_fpu() || new_fpu->counter > 5);
+
+       if (old_fpu->fpregs_active) {
+               if (!copy_fpregs_to_fpstate(old_fpu))
+                       old_fpu->last_cpu = -1;
+               else
+                       old_fpu->last_cpu = cpu;
+
+               /* But leave fpu_fpregs_owner_ctx! */
+               old_fpu->fpregs_active = 0;
+
+               /* Don't change CR0.TS if we just switch! */
+               if (fpu.preload) {
+                       new_fpu->counter++;
+                       __fpregs_activate(new_fpu);
+                       prefetch(&new_fpu->state);
+               } else {
+                       __fpregs_deactivate_hw();
+               }
+       } else {
+               old_fpu->counter = 0;
+               old_fpu->last_cpu = -1;
+               if (fpu.preload) {
+                       new_fpu->counter++;
+                       if (fpu_want_lazy_restore(new_fpu, cpu))
+                               fpu.preload = 0;
+                       else
+                               prefetch(&new_fpu->state);
+                       fpregs_activate(new_fpu);
+               }
+       }
+       return fpu;
+}
+
+/*
+ * Misc helper functions:
+ */
+
+/*
+ * By the time this gets called, we've already cleared CR0.TS and
+ * given the process the FPU if we are going to preload the FPU
+ * state - all we need to do is to conditionally restore the register
+ * state itself.
+ */
+static inline void switch_fpu_finish(struct fpu *new_fpu, fpu_switch_t fpu_switch)
+{
+       if (fpu_switch.preload)
+               copy_kernel_to_fpregs(&new_fpu->state);
+}
+
+/*
+ * Needs to be preemption-safe.
+ *
+ * NOTE! user_fpu_begin() must be used only immediately before restoring
+ * the save state. It does not do any saving/restoring on its own. In
+ * lazy FPU mode, it is just an optimization to avoid a #NM exception,
+ * the task can lose the FPU right after preempt_enable().
+ */
+static inline void user_fpu_begin(void)
+{
+       struct fpu *fpu = &current->thread.fpu;
+
+       preempt_disable();
+       if (!fpregs_active())
+               fpregs_activate(fpu);
+       preempt_enable();
+}
+
+/*
+ * MXCSR and XCR definitions:
+ */
+
+extern unsigned int mxcsr_feature_mask;
+
+#define XCR_XFEATURE_ENABLED_MASK      0x00000000
+
+static inline u64 xgetbv(u32 index)
+{
+       u32 eax, edx;
+
+       asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */
+                    : "=a" (eax), "=d" (edx)
+                    : "c" (index));
+       return eax + ((u64)edx << 32);
+}
+
+static inline void xsetbv(u32 index, u64 value)
+{
+       u32 eax = value;
+       u32 edx = value >> 32;
+
+       asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */
+                    : : "a" (eax), "d" (edx), "c" (index));
+}
+
+#endif /* _ASM_X86_FPU_INTERNAL_H */
diff --git a/arch/x86/include/asm/fpu/regset.h b/arch/x86/include/asm/fpu/regset.h
new file mode 100644 (file)
index 0000000..39d3107
--- /dev/null
@@ -0,0 +1,21 @@
+/*
+ * FPU regset handling methods:
+ */
+#ifndef _ASM_X86_FPU_REGSET_H
+#define _ASM_X86_FPU_REGSET_H
+
+#include <linux/regset.h>
+
+extern user_regset_active_fn regset_fpregs_active, regset_xregset_fpregs_active;
+extern user_regset_get_fn fpregs_get, xfpregs_get, fpregs_soft_get,
+                               xstateregs_get;
+extern user_regset_set_fn fpregs_set, xfpregs_set, fpregs_soft_set,
+                                xstateregs_set;
+
+/*
+ * xstateregs_active == regset_fpregs_active. Please refer to the comment
+ * at the definition of regset_fpregs_active.
+ */
+#define xstateregs_active      regset_fpregs_active
+
+#endif /* _ASM_X86_FPU_REGSET_H */
diff --git a/arch/x86/include/asm/fpu/signal.h b/arch/x86/include/asm/fpu/signal.h
new file mode 100644 (file)
index 0000000..7358e9d
--- /dev/null
@@ -0,0 +1,33 @@
+/*
+ * x86 FPU signal frame handling methods:
+ */
+#ifndef _ASM_X86_FPU_SIGNAL_H
+#define _ASM_X86_FPU_SIGNAL_H
+
+#ifdef CONFIG_X86_64
+# include <asm/sigcontext32.h>
+# include <asm/user32.h>
+struct ksignal;
+int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
+                       compat_sigset_t *set, struct pt_regs *regs);
+int ia32_setup_frame(int sig, struct ksignal *ksig,
+                    compat_sigset_t *set, struct pt_regs *regs);
+#else
+# define user_i387_ia32_struct user_i387_struct
+# define user32_fxsr_struct    user_fxsr_struct
+# define ia32_setup_frame      __setup_frame
+# define ia32_setup_rt_frame   __setup_rt_frame
+#endif
+
+extern void convert_from_fxsr(struct user_i387_ia32_struct *env,
+                             struct task_struct *tsk);
+extern void convert_to_fxsr(struct task_struct *tsk,
+                           const struct user_i387_ia32_struct *env);
+
+unsigned long
+fpu__alloc_mathframe(unsigned long sp, int ia32_frame,
+                    unsigned long *buf_fx, unsigned long *size);
+
+extern void fpu__init_prepare_fx_sw_frame(void);
+
+#endif /* _ASM_X86_FPU_SIGNAL_H */
diff --git a/arch/x86/include/asm/fpu/types.h b/arch/x86/include/asm/fpu/types.h
new file mode 100644 (file)
index 0000000..0637826
--- /dev/null
@@ -0,0 +1,293 @@
+/*
+ * FPU data structures:
+ */
+#ifndef _ASM_X86_FPU_H
+#define _ASM_X86_FPU_H
+
+/*
+ * The legacy x87 FPU state format, as saved by FSAVE and
+ * restored by the FRSTOR instructions:
+ */
+struct fregs_state {
+       u32                     cwd;    /* FPU Control Word             */
+       u32                     swd;    /* FPU Status Word              */
+       u32                     twd;    /* FPU Tag Word                 */
+       u32                     fip;    /* FPU IP Offset                */
+       u32                     fcs;    /* FPU IP Selector              */
+       u32                     foo;    /* FPU Operand Pointer Offset   */
+       u32                     fos;    /* FPU Operand Pointer Selector */
+
+       /* 8*10 bytes for each FP-reg = 80 bytes:                       */
+       u32                     st_space[20];
+
+       /* Software status information [not touched by FSAVE]:          */
+       u32                     status;
+};
+
+/*
+ * The legacy fx SSE/MMX FPU state format, as saved by FXSAVE and
+ * restored by the FXRSTOR instructions. It's similar to the FSAVE
+ * format, but differs in some areas, plus has extensions at
+ * the end for the XMM registers.
+ */
+struct fxregs_state {
+       u16                     cwd; /* Control Word                    */
+       u16                     swd; /* Status Word                     */
+       u16                     twd; /* Tag Word                        */
+       u16                     fop; /* Last Instruction Opcode         */
+       union {
+               struct {
+                       u64     rip; /* Instruction Pointer             */
+                       u64     rdp; /* Data Pointer                    */
+               };
+               struct {
+                       u32     fip; /* FPU IP Offset                   */
+                       u32     fcs; /* FPU IP Selector                 */
+                       u32     foo; /* FPU Operand Offset              */
+                       u32     fos; /* FPU Operand Selector            */
+               };
+       };
+       u32                     mxcsr;          /* MXCSR Register State */
+       u32                     mxcsr_mask;     /* MXCSR Mask           */
+
+       /* 8*16 bytes for each FP-reg = 128 bytes:                      */
+       u32                     st_space[32];
+
+       /* 16*16 bytes for each XMM-reg = 256 bytes:                    */
+       u32                     xmm_space[64];
+
+       u32                     padding[12];
+
+       union {
+               u32             padding1[12];
+               u32             sw_reserved[12];
+       };
+
+} __attribute__((aligned(16)));
+
+/* Default value for fxregs_state.mxcsr: */
+#define MXCSR_DEFAULT          0x1f80
+
+/*
+ * Software based FPU emulation state. This is arbitrary really,
+ * it matches the x87 format to make it easier to understand:
+ */
+struct swregs_state {
+       u32                     cwd;
+       u32                     swd;
+       u32                     twd;
+       u32                     fip;
+       u32                     fcs;
+       u32                     foo;
+       u32                     fos;
+       /* 8*10 bytes for each FP-reg = 80 bytes: */
+       u32                     st_space[20];
+       u8                      ftop;
+       u8                      changed;
+       u8                      lookahead;
+       u8                      no_update;
+       u8                      rm;
+       u8                      alimit;
+       struct math_emu_info    *info;
+       u32                     entry_eip;
+};
+
+/*
+ * List of XSAVE features Linux knows about:
+ */
+enum xfeature_bit {
+       XSTATE_BIT_FP,
+       XSTATE_BIT_SSE,
+       XSTATE_BIT_YMM,
+       XSTATE_BIT_BNDREGS,
+       XSTATE_BIT_BNDCSR,
+       XSTATE_BIT_OPMASK,
+       XSTATE_BIT_ZMM_Hi256,
+       XSTATE_BIT_Hi16_ZMM,
+
+       XFEATURES_NR_MAX,
+};
+
+#define XSTATE_FP              (1 << XSTATE_BIT_FP)
+#define XSTATE_SSE             (1 << XSTATE_BIT_SSE)
+#define XSTATE_YMM             (1 << XSTATE_BIT_YMM)
+#define XSTATE_BNDREGS         (1 << XSTATE_BIT_BNDREGS)
+#define XSTATE_BNDCSR          (1 << XSTATE_BIT_BNDCSR)
+#define XSTATE_OPMASK          (1 << XSTATE_BIT_OPMASK)
+#define XSTATE_ZMM_Hi256       (1 << XSTATE_BIT_ZMM_Hi256)
+#define XSTATE_Hi16_ZMM                (1 << XSTATE_BIT_Hi16_ZMM)
+
+#define XSTATE_FPSSE           (XSTATE_FP | XSTATE_SSE)
+#define XSTATE_AVX512          (XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
+
+/*
+ * There are 16x 256-bit AVX registers named YMM0-YMM15.
+ * The low 128 bits are aliased to the 16 SSE registers (XMM0-XMM15)
+ * and are stored in 'struct fxregs_state::xmm_space[]'.
+ *
+ * The high 128 bits are stored here:
+ *    16x 128 bits == 256 bytes.
+ */
+struct ymmh_struct {
+       u8                              ymmh_space[256];
+};
+
+/* We don't support LWP yet: */
+struct lwp_struct {
+       u8                              reserved[128];
+};
+
+/* Intel MPX support: */
+struct bndreg {
+       u64                             lower_bound;
+       u64                             upper_bound;
+} __packed;
+
+struct bndcsr {
+       u64                             bndcfgu;
+       u64                             bndstatus;
+} __packed;
+
+struct mpx_struct {
+       struct bndreg                   bndreg[4];
+       struct bndcsr                   bndcsr;
+};
+
+struct xstate_header {
+       u64                             xfeatures;
+       u64                             xcomp_bv;
+       u64                             reserved[6];
+} __attribute__((packed));
+
+/* New processor state extensions should be added here: */
+#define XSTATE_RESERVE                 (sizeof(struct ymmh_struct) + \
+                                        sizeof(struct lwp_struct)  + \
+                                        sizeof(struct mpx_struct)  )
+/*
+ * This is our most modern FPU state format, as saved by the XSAVE
+ * and restored by the XRSTOR instructions.
+ *
+ * It consists of a legacy fxregs portion, an xstate header and
+ * subsequent fixed size areas as defined by the xstate header.
+ * Not all CPUs support all the extensions.
+ */
+struct xregs_state {
+       struct fxregs_state             i387;
+       struct xstate_header            header;
+       u8                              __reserved[XSTATE_RESERVE];
+} __attribute__ ((packed, aligned (64)));
+
+/*
+ * This is a union of all the possible FPU state formats
+ * put together, so that we can pick the right one runtime.
+ *
+ * The size of the structure is determined by the largest
+ * member - which is the xsave area:
+ */
+union fpregs_state {
+       struct fregs_state              fsave;
+       struct fxregs_state             fxsave;
+       struct swregs_state             soft;
+       struct xregs_state              xsave;
+};
+
+/*
+ * Highest level per task FPU state data structure that
+ * contains the FPU register state plus various FPU
+ * state fields:
+ */
+struct fpu {
+       /*
+        * @state:
+        *
+        * In-memory copy of all FPU registers that we save/restore
+        * over context switches. If the task is using the FPU then
+        * the registers in the FPU are more recent than this state
+        * copy. If the task context-switches away then they get
+        * saved here and represent the FPU state.
+        *
+        * After context switches there may be a (short) time period
+        * during which the in-FPU hardware registers are unchanged
+        * and still perfectly match this state, if the tasks
+        * scheduled afterwards are not using the FPU.
+        *
+        * This is the 'lazy restore' window of optimization, which
+        * we track though 'fpu_fpregs_owner_ctx' and 'fpu->last_cpu'.
+        *
+        * We detect whether a subsequent task uses the FPU via setting
+        * CR0::TS to 1, which causes any FPU use to raise a #NM fault.
+        *
+        * During this window, if the task gets scheduled again, we
+        * might be able to skip having to do a restore from this
+        * memory buffer to the hardware registers - at the cost of
+        * incurring the overhead of #NM fault traps.
+        *
+        * Note that on modern CPUs that support the XSAVEOPT (or other
+        * optimized XSAVE instructions), we don't use #NM traps anymore,
+        * as the hardware can track whether FPU registers need saving
+        * or not. On such CPUs we activate the non-lazy ('eagerfpu')
+        * logic, which unconditionally saves/restores all FPU state
+        * across context switches. (if FPU state exists.)
+        */
+       union fpregs_state              state;
+
+       /*
+        * @last_cpu:
+        *
+        * Records the last CPU on which this context was loaded into
+        * FPU registers. (In the lazy-restore case we might be
+        * able to reuse FPU registers across multiple context switches
+        * this way, if no intermediate task used the FPU.)
+        *
+        * A value of -1 is used to indicate that the FPU state in context
+        * memory is newer than the FPU state in registers, and that the
+        * FPU state should be reloaded next time the task is run.
+        */
+       unsigned int                    last_cpu;
+
+       /*
+        * @fpstate_active:
+        *
+        * This flag indicates whether this context is active: if the task
+        * is not running then we can restore from this context, if the task
+        * is running then we should save into this context.
+        */
+       unsigned char                   fpstate_active;
+
+       /*
+        * @fpregs_active:
+        *
+        * This flag determines whether a given context is actively
+        * loaded into the FPU's registers and that those registers
+        * represent the task's current FPU state.
+        *
+        * Note the interaction with fpstate_active:
+        *
+        *   # task does not use the FPU:
+        *   fpstate_active == 0
+        *
+        *   # task uses the FPU and regs are active:
+        *   fpstate_active == 1 && fpregs_active == 1
+        *
+        *   # the regs are inactive but still match fpstate:
+        *   fpstate_active == 1 && fpregs_active == 0 && fpregs_owner == fpu
+        *
+        * The third state is what we use for the lazy restore optimization
+        * on lazy-switching CPUs.
+        */
+       unsigned char                   fpregs_active;
+
+       /*
+        * @counter:
+        *
+        * This counter contains the number of consecutive context switches
+        * during which the FPU stays used. If this is over a threshold, the
+        * lazy FPU restore logic becomes eager, to save the trap overhead.
+        * This is an unsigned char so that after 256 iterations the counter
+        * wraps and the context switch behavior turns lazy again; this is to
+        * deal with bursty apps that only use the FPU for a short time:
+        */
+       unsigned char                   counter;
+};
+
+#endif /* _ASM_X86_FPU_H */
diff --git a/arch/x86/include/asm/fpu/xstate.h b/arch/x86/include/asm/fpu/xstate.h
new file mode 100644 (file)
index 0000000..4656b25
--- /dev/null
@@ -0,0 +1,46 @@
+#ifndef __ASM_X86_XSAVE_H
+#define __ASM_X86_XSAVE_H
+
+#include <linux/types.h>
+#include <asm/processor.h>
+#include <linux/uaccess.h>
+
+/* Bit 63 of XCR0 is reserved for future expansion */
+#define XSTATE_EXTEND_MASK     (~(XSTATE_FPSSE | (1ULL << 63)))
+
+#define XSTATE_CPUID           0x0000000d
+
+#define FXSAVE_SIZE    512
+
+#define XSAVE_HDR_SIZE     64
+#define XSAVE_HDR_OFFSET    FXSAVE_SIZE
+
+#define XSAVE_YMM_SIZE     256
+#define XSAVE_YMM_OFFSET    (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
+
+/* Supported features which support lazy state saving */
+#define XSTATE_LAZY    (XSTATE_FP | XSTATE_SSE | XSTATE_YMM                  \
+                       | XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
+
+/* Supported features which require eager state saving */
+#define XSTATE_EAGER   (XSTATE_BNDREGS | XSTATE_BNDCSR)
+
+/* All currently supported features */
+#define XCNTXT_MASK    (XSTATE_LAZY | XSTATE_EAGER)
+
+#ifdef CONFIG_X86_64
+#define REX_PREFIX     "0x48, "
+#else
+#define REX_PREFIX
+#endif
+
+extern unsigned int xstate_size;
+extern u64 xfeatures_mask;
+extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
+
+extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask);
+
+void *get_xsave_addr(struct xregs_state *xsave, int xstate);
+const void *get_xsave_field_ptr(int xstate_field);
+
+#endif
index 3b629f47eb65b4a51f6399ad153d44f9b5c24b0d..793179cf8e21aa89636f869fc3a9e2fe0b4a29e0 100644 (file)
@@ -1,20 +1,17 @@
 #ifdef __ASSEMBLY__
 
 #include <asm/asm.h>
-#include <asm/dwarf2.h>
 
 /* The annotation hides the frame from the unwinder and makes it look
    like a ordinary ebp save/restore. This avoids some special cases for
    frame pointer later */
 #ifdef CONFIG_FRAME_POINTER
        .macro FRAME
-       __ASM_SIZE(push,_cfi)   %__ASM_REG(bp)
-       CFI_REL_OFFSET          __ASM_REG(bp), 0
+       __ASM_SIZE(push,)       %__ASM_REG(bp)
        __ASM_SIZE(mov)         %__ASM_REG(sp), %__ASM_REG(bp)
        .endm
        .macro ENDFRAME
-       __ASM_SIZE(pop,_cfi)    %__ASM_REG(bp)
-       CFI_RESTORE             __ASM_REG(bp)
+       __ASM_SIZE(pop,)        %__ASM_REG(bp)
        .endm
 #else
        .macro FRAME
index 0f5fb6b6567e9c7e2856e86678189a82af0d853e..7178043b0e1dd69d20a6ff5ddaa37ee6c32841f8 100644 (file)
@@ -14,6 +14,7 @@ typedef struct {
 #endif
 #ifdef CONFIG_HAVE_KVM
        unsigned int kvm_posted_intr_ipis;
+       unsigned int kvm_posted_intr_wakeup_ipis;
 #endif
        unsigned int x86_platform_ipis; /* arch dependent */
        unsigned int apic_perf_irqs;
@@ -33,6 +34,9 @@ typedef struct {
 #ifdef CONFIG_X86_MCE_THRESHOLD
        unsigned int irq_threshold_count;
 #endif
+#ifdef CONFIG_X86_MCE_AMD
+       unsigned int irq_deferred_error_count;
+#endif
 #if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
        unsigned int irq_hv_callback_count;
 #endif
index 36f7125945e3e241cdf2ac825124fd8d7883e0ba..5fa9fb0f8809902a8e15f6f8fa1d245d379a6a16 100644 (file)
@@ -74,20 +74,16 @@ extern unsigned int hpet_readl(unsigned int a);
 extern void force_hpet_resume(void);
 
 struct irq_data;
+struct hpet_dev;
+struct irq_domain;
+
 extern void hpet_msi_unmask(struct irq_data *data);
 extern void hpet_msi_mask(struct irq_data *data);
-struct hpet_dev;
 extern void hpet_msi_write(struct hpet_dev *hdev, struct msi_msg *msg);
 extern void hpet_msi_read(struct hpet_dev *hdev, struct msi_msg *msg);
-
-#ifdef CONFIG_PCI_MSI
-extern int default_setup_hpet_msi(unsigned int irq, unsigned int id);
-#else
-static inline int default_setup_hpet_msi(unsigned int irq, unsigned int id)
-{
-       return -EINVAL;
-}
-#endif
+extern struct irq_domain *hpet_create_irq_domain(int hpet_id);
+extern int hpet_assign_irq(struct irq_domain *domain,
+                          struct hpet_dev *dev, int dev_num);
 
 #ifdef CONFIG_HPET_EMULATE_RTC
 
index e9571ddabc4feb821ae04d47c9d6c3b509178344..6615032e19c80b79f3cd023ee291c790bcb07ae2 100644 (file)
@@ -29,6 +29,7 @@
 extern asmlinkage void apic_timer_interrupt(void);
 extern asmlinkage void x86_platform_ipi(void);
 extern asmlinkage void kvm_posted_intr_ipi(void);
+extern asmlinkage void kvm_posted_intr_wakeup_ipi(void);
 extern asmlinkage void error_interrupt(void);
 extern asmlinkage void irq_work_interrupt(void);
 
@@ -36,43 +37,10 @@ extern asmlinkage void spurious_interrupt(void);
 extern asmlinkage void thermal_interrupt(void);
 extern asmlinkage void reschedule_interrupt(void);
 
-extern asmlinkage void invalidate_interrupt(void);
-extern asmlinkage void invalidate_interrupt0(void);
-extern asmlinkage void invalidate_interrupt1(void);
-extern asmlinkage void invalidate_interrupt2(void);
-extern asmlinkage void invalidate_interrupt3(void);
-extern asmlinkage void invalidate_interrupt4(void);
-extern asmlinkage void invalidate_interrupt5(void);
-extern asmlinkage void invalidate_interrupt6(void);
-extern asmlinkage void invalidate_interrupt7(void);
-extern asmlinkage void invalidate_interrupt8(void);
-extern asmlinkage void invalidate_interrupt9(void);
-extern asmlinkage void invalidate_interrupt10(void);
-extern asmlinkage void invalidate_interrupt11(void);
-extern asmlinkage void invalidate_interrupt12(void);
-extern asmlinkage void invalidate_interrupt13(void);
-extern asmlinkage void invalidate_interrupt14(void);
-extern asmlinkage void invalidate_interrupt15(void);
-extern asmlinkage void invalidate_interrupt16(void);
-extern asmlinkage void invalidate_interrupt17(void);
-extern asmlinkage void invalidate_interrupt18(void);
-extern asmlinkage void invalidate_interrupt19(void);
-extern asmlinkage void invalidate_interrupt20(void);
-extern asmlinkage void invalidate_interrupt21(void);
-extern asmlinkage void invalidate_interrupt22(void);
-extern asmlinkage void invalidate_interrupt23(void);
-extern asmlinkage void invalidate_interrupt24(void);
-extern asmlinkage void invalidate_interrupt25(void);
-extern asmlinkage void invalidate_interrupt26(void);
-extern asmlinkage void invalidate_interrupt27(void);
-extern asmlinkage void invalidate_interrupt28(void);
-extern asmlinkage void invalidate_interrupt29(void);
-extern asmlinkage void invalidate_interrupt30(void);
-extern asmlinkage void invalidate_interrupt31(void);
-
 extern asmlinkage void irq_move_cleanup_interrupt(void);
 extern asmlinkage void reboot_interrupt(void);
 extern asmlinkage void threshold_interrupt(void);
+extern asmlinkage void deferred_error_interrupt(void);
 
 extern asmlinkage void call_function_interrupt(void);
 extern asmlinkage void call_function_single_interrupt(void);
@@ -87,60 +55,93 @@ extern void trace_spurious_interrupt(void);
 extern void trace_thermal_interrupt(void);
 extern void trace_reschedule_interrupt(void);
 extern void trace_threshold_interrupt(void);
+extern void trace_deferred_error_interrupt(void);
 extern void trace_call_function_interrupt(void);
 extern void trace_call_function_single_interrupt(void);
 #define trace_irq_move_cleanup_interrupt  irq_move_cleanup_interrupt
 #define trace_reboot_interrupt  reboot_interrupt
 #define trace_kvm_posted_intr_ipi kvm_posted_intr_ipi
+#define trace_kvm_posted_intr_wakeup_ipi kvm_posted_intr_wakeup_ipi
 #endif /* CONFIG_TRACING */
 
-#ifdef CONFIG_IRQ_REMAP
-/* Intel specific interrupt remapping information */
-struct irq_2_iommu {
-       struct intel_iommu *iommu;
-       u16 irte_index;
-       u16 sub_handle;
-       u8  irte_mask;
-};
-
-/* AMD specific interrupt remapping information */
-struct irq_2_irte {
-       u16 devid; /* Device ID for IRTE table */
-       u16 index; /* Index into IRTE table*/
-};
-#endif /* CONFIG_IRQ_REMAP */
-
 #ifdef CONFIG_X86_LOCAL_APIC
 struct irq_data;
+struct pci_dev;
+struct msi_desc;
+
+enum irq_alloc_type {
+       X86_IRQ_ALLOC_TYPE_IOAPIC = 1,
+       X86_IRQ_ALLOC_TYPE_HPET,
+       X86_IRQ_ALLOC_TYPE_MSI,
+       X86_IRQ_ALLOC_TYPE_MSIX,
+       X86_IRQ_ALLOC_TYPE_DMAR,
+       X86_IRQ_ALLOC_TYPE_UV,
+};
 
-struct irq_cfg {
-       cpumask_var_t           domain;
-       cpumask_var_t           old_domain;
-       u8                      vector;
-       u8                      move_in_progress : 1;
-#ifdef CONFIG_IRQ_REMAP
-       u8                      remapped : 1;
+struct irq_alloc_info {
+       enum irq_alloc_type     type;
+       u32                     flags;
+       const struct cpumask    *mask;  /* CPU mask for vector allocation */
        union {
-               struct irq_2_iommu irq_2_iommu;
-               struct irq_2_irte  irq_2_irte;
-       };
+               int             unused;
+#ifdef CONFIG_HPET_TIMER
+               struct {
+                       int             hpet_id;
+                       int             hpet_index;
+                       void            *hpet_data;
+               };
 #endif
-       union {
-#ifdef CONFIG_X86_IO_APIC
+#ifdef CONFIG_PCI_MSI
                struct {
-                       struct list_head        irq_2_pin;
+                       struct pci_dev  *msi_dev;
+                       irq_hw_number_t msi_hwirq;
+               };
+#endif
+#ifdef CONFIG_X86_IO_APIC
+               struct {
+                       int             ioapic_id;
+                       int             ioapic_pin;
+                       int             ioapic_node;
+                       u32             ioapic_trigger : 1;
+                       u32             ioapic_polarity : 1;
+                       u32             ioapic_valid : 1;
+                       struct IO_APIC_route_entry *ioapic_entry;
+               };
+#endif
+#ifdef CONFIG_DMAR_TABLE
+               struct {
+                       int             dmar_id;
+                       void            *dmar_data;
+               };
+#endif
+#ifdef CONFIG_HT_IRQ
+               struct {
+                       int             ht_pos;
+                       int             ht_idx;
+                       struct pci_dev  *ht_dev;
+                       void            *ht_update;
+               };
+#endif
+#ifdef CONFIG_X86_UV
+               struct {
+                       int             uv_limit;
+                       int             uv_blade;
+                       unsigned long   uv_offset;
+                       char            *uv_name;
                };
 #endif
        };
 };
 
+struct irq_cfg {
+       unsigned int            dest_apicid;
+       u8                      vector;
+};
+
 extern struct irq_cfg *irq_cfg(unsigned int irq);
 extern struct irq_cfg *irqd_cfg(struct irq_data *irq_data);
-extern struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node);
 extern void lock_vector_lock(void);
 extern void unlock_vector_lock(void);
-extern int assign_irq_vector(int, struct irq_cfg *, const struct cpumask *);
-extern void clear_irq_vector(int irq, struct irq_cfg *cfg);
 extern void setup_vector_irq(int cpu);
 #ifdef CONFIG_SMP
 extern void send_cleanup_vector(struct irq_cfg *);
@@ -150,10 +151,7 @@ static inline void send_cleanup_vector(struct irq_cfg *c) { }
 static inline void irq_complete_move(struct irq_cfg *c) { }
 #endif
 
-extern int apic_retrigger_irq(struct irq_data *data);
 extern void apic_ack_edge(struct irq_data *data);
-extern int apic_set_affinity(struct irq_data *data, const struct cpumask *mask,
-                            unsigned int *dest_id);
 #else  /*  CONFIG_X86_LOCAL_APIC */
 static inline void lock_vector_lock(void) {}
 static inline void unlock_vector_lock(void) {}
@@ -163,8 +161,7 @@ static inline void unlock_vector_lock(void) {}
 extern atomic_t irq_err_count;
 extern atomic_t irq_mis_count;
 
-/* EISA */
-extern void eisa_set_level_irq(unsigned int irq);
+extern void elcr_set_level_irq(unsigned int irq);
 
 /* SMP */
 extern __visible void smp_apic_timer_interrupt(struct pt_regs *);
@@ -178,7 +175,6 @@ extern asmlinkage void smp_irq_move_cleanup_interrupt(void);
 extern __visible void smp_reschedule_interrupt(struct pt_regs *);
 extern __visible void smp_call_function_interrupt(struct pt_regs *);
 extern __visible void smp_call_function_single_interrupt(struct pt_regs *);
-extern __visible void smp_invalidate_interrupt(struct pt_regs *);
 #endif
 
 extern char irq_entries_start[];
diff --git a/arch/x86/include/asm/i387.h b/arch/x86/include/asm/i387.h
deleted file mode 100644 (file)
index 6eb6fcb..0000000
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright (C) 1994 Linus Torvalds
- *
- * Pentium III FXSR, SSE support
- * General FPU state handling cleanups
- *     Gareth Hughes <gareth@valinux.com>, May 2000
- * x86-64 work by Andi Kleen 2002
- */
-
-#ifndef _ASM_X86_I387_H
-#define _ASM_X86_I387_H
-
-#ifndef __ASSEMBLY__
-
-#include <linux/sched.h>
-#include <linux/hardirq.h>
-
-struct pt_regs;
-struct user_i387_struct;
-
-extern int init_fpu(struct task_struct *child);
-extern void fpu_finit(struct fpu *fpu);
-extern int dump_fpu(struct pt_regs *, struct user_i387_struct *);
-extern void math_state_restore(void);
-
-extern bool irq_fpu_usable(void);
-
-/*
- * Careful: __kernel_fpu_begin/end() must be called with preempt disabled
- * and they don't touch the preempt state on their own.
- * If you enable preemption after __kernel_fpu_begin(), preempt notifier
- * should call the __kernel_fpu_end() to prevent the kernel/user FPU
- * state from getting corrupted. KVM for example uses this model.
- *
- * All other cases use kernel_fpu_begin/end() which disable preemption
- * during kernel FPU usage.
- */
-extern void __kernel_fpu_begin(void);
-extern void __kernel_fpu_end(void);
-
-static inline void kernel_fpu_begin(void)
-{
-       preempt_disable();
-       WARN_ON_ONCE(!irq_fpu_usable());
-       __kernel_fpu_begin();
-}
-
-static inline void kernel_fpu_end(void)
-{
-       __kernel_fpu_end();
-       preempt_enable();
-}
-
-/* Must be called with preempt disabled */
-extern void kernel_fpu_disable(void);
-extern void kernel_fpu_enable(void);
-
-/*
- * Some instructions like VIA's padlock instructions generate a spurious
- * DNA fault but don't modify SSE registers. And these instructions
- * get used from interrupt context as well. To prevent these kernel instructions
- * in interrupt context interacting wrongly with other user/kernel fpu usage, we
- * should use them only in the context of irq_ts_save/restore()
- */
-static inline int irq_ts_save(void)
-{
-       /*
-        * If in process context and not atomic, we can take a spurious DNA fault.
-        * Otherwise, doing clts() in process context requires disabling preemption
-        * or some heavy lifting like kernel_fpu_begin()
-        */
-       if (!in_atomic())
-               return 0;
-
-       if (read_cr0() & X86_CR0_TS) {
-               clts();
-               return 1;
-       }
-
-       return 0;
-}
-
-static inline void irq_ts_restore(int TS_state)
-{
-       if (TS_state)
-               stts();
-}
-
-/*
- * The question "does this thread have fpu access?"
- * is slightly racy, since preemption could come in
- * and revoke it immediately after the test.
- *
- * However, even in that very unlikely scenario,
- * we can just assume we have FPU access - typically
- * to save the FP state - we'll just take a #NM
- * fault and get the FPU access back.
- */
-static inline int user_has_fpu(void)
-{
-       return current->thread.fpu.has_fpu;
-}
-
-extern void unlazy_fpu(struct task_struct *tsk);
-
-#endif /* __ASSEMBLY__ */
-
-#endif /* _ASM_X86_I387_H */
index 34a5b93704d3ecb1d98190f6a12437a9acab7204..83ec9b1d77cc17eecb4281118290bbb278ecc273 100644 (file)
   */
 
 #define ARCH_HAS_IOREMAP_WC
+#define ARCH_HAS_IOREMAP_WT
 
 #include <linux/string.h>
 #include <linux/compiler.h>
 #include <asm/page.h>
 #include <asm/early_ioremap.h>
+#include <asm/pgtable_types.h>
 
 #define build_mmio_read(name, size, type, reg, barrier) \
 static inline type name(const volatile void __iomem *addr) \
@@ -177,6 +179,7 @@ static inline unsigned int isa_virt_to_bus(volatile void *address)
  * look at pci_iomap().
  */
 extern void __iomem *ioremap_nocache(resource_size_t offset, unsigned long size);
+extern void __iomem *ioremap_uc(resource_size_t offset, unsigned long size);
 extern void __iomem *ioremap_cache(resource_size_t offset, unsigned long size);
 extern void __iomem *ioremap_prot(resource_size_t offset, unsigned long size,
                                unsigned long prot_val);
@@ -197,8 +200,6 @@ extern void set_iounmap_nonlazy(void);
 
 #include <asm-generic/iomap.h>
 
-#include <linux/vmalloc.h>
-
 /*
  * Convert a virtual cached pointer to an uncached pointer
  */
@@ -320,6 +321,7 @@ extern void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr);
 extern int ioremap_change_attr(unsigned long vaddr, unsigned long size,
                                enum page_cache_mode pcm);
 extern void __iomem *ioremap_wc(resource_size_t offset, unsigned long size);
+extern void __iomem *ioremap_wt(resource_size_t offset, unsigned long size);
 
 extern bool is_early_ioremap_ptep(pte_t *ptep);
 
@@ -338,6 +340,9 @@ extern bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
 #define IO_SPACE_LIMIT 0xffff
 
 #ifdef CONFIG_MTRR
+extern int __must_check arch_phys_wc_index(int handle);
+#define arch_phys_wc_index arch_phys_wc_index
+
 extern int __must_check arch_phys_wc_add(unsigned long base,
                                         unsigned long size);
 extern void arch_phys_wc_del(int handle);
index 2f91685fe1cdb51d937eb20d29c46952d54f298f..6cbf2cfb3f8a02481d1c5c63b99eafb0c29eba35 100644 (file)
@@ -95,9 +95,22 @@ struct IR_IO_APIC_route_entry {
                index           : 15;
 } __attribute__ ((packed));
 
-#define IOAPIC_AUTO     -1
-#define IOAPIC_EDGE     0
-#define IOAPIC_LEVEL    1
+struct irq_alloc_info;
+struct ioapic_domain_cfg;
+
+#define IOAPIC_AUTO                    -1
+#define IOAPIC_EDGE                    0
+#define IOAPIC_LEVEL                   1
+
+#define IOAPIC_MASKED                  1
+#define IOAPIC_UNMASKED                        0
+
+#define IOAPIC_POL_HIGH                        0
+#define IOAPIC_POL_LOW                 1
+
+#define IOAPIC_DEST_MODE_PHYSICAL      0
+#define IOAPIC_DEST_MODE_LOGICAL       1
+
 #define        IOAPIC_MAP_ALLOC                0x1
 #define        IOAPIC_MAP_CHECK                0x2
 
@@ -110,9 +123,6 @@ extern int nr_ioapics;
 
 extern int mpc_ioapic_id(int ioapic);
 extern unsigned int mpc_ioapic_addr(int ioapic);
-extern struct mp_ioapic_gsi *mp_ioapic_gsi_routing(int ioapic);
-
-#define MP_MAX_IOAPIC_PIN 127
 
 /* # of MP IRQ source entries */
 extern int mp_irq_entries;
@@ -120,9 +130,6 @@ extern int mp_irq_entries;
 /* MP IRQ source entries */
 extern struct mpc_intsrc mp_irqs[MAX_IRQ_SOURCES];
 
-/* Older SiS APIC requires we rewrite the index register */
-extern int sis_apic_bug;
-
 /* 1 if "noapic" boot option passed */
 extern int skip_ioapic_setup;
 
@@ -132,6 +139,8 @@ extern int noioapicquirk;
 /* -1 if "noapic" boot option passed */
 extern int noioapicreroute;
 
+extern u32 gsi_top;
+
 extern unsigned long io_apic_irqs;
 
 #define IO_APIC_IRQ(x) (((x) >= NR_IRQS_LEGACY) || ((1 << (x)) & io_apic_irqs))
@@ -147,13 +156,6 @@ struct irq_cfg;
 extern void ioapic_insert_resources(void);
 extern int arch_early_ioapic_init(void);
 
-extern int native_setup_ioapic_entry(int, struct IO_APIC_route_entry *,
-                                    unsigned int, int,
-                                    struct io_apic_irq_attr *);
-extern void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg);
-
-extern void native_eoi_ioapic_pin(int apic, int pin, int vector);
-
 extern int save_ioapic_entries(void);
 extern void mask_ioapic_entries(void);
 extern int restore_ioapic_entries(void);
@@ -161,82 +163,32 @@ extern int restore_ioapic_entries(void);
 extern void setup_ioapic_ids_from_mpc(void);
 extern void setup_ioapic_ids_from_mpc_nocheck(void);
 
-struct io_apic_irq_attr {
-       int ioapic;
-       int ioapic_pin;
-       int trigger;
-       int polarity;
-};
-
-enum ioapic_domain_type {
-       IOAPIC_DOMAIN_INVALID,
-       IOAPIC_DOMAIN_LEGACY,
-       IOAPIC_DOMAIN_STRICT,
-       IOAPIC_DOMAIN_DYNAMIC,
-};
-
-struct device_node;
-struct irq_domain;
-struct irq_domain_ops;
-
-struct ioapic_domain_cfg {
-       enum ioapic_domain_type         type;
-       const struct irq_domain_ops     *ops;
-       struct device_node              *dev;
-};
-
-struct mp_ioapic_gsi{
-       u32 gsi_base;
-       u32 gsi_end;
-};
-extern u32 gsi_top;
-
 extern int mp_find_ioapic(u32 gsi);
 extern int mp_find_ioapic_pin(int ioapic, u32 gsi);
-extern u32 mp_pin_to_gsi(int ioapic, int pin);
-extern int mp_map_gsi_to_irq(u32 gsi, unsigned int flags);
+extern int mp_map_gsi_to_irq(u32 gsi, unsigned int flags,
+                            struct irq_alloc_info *info);
 extern void mp_unmap_irq(int irq);
 extern int mp_register_ioapic(int id, u32 address, u32 gsi_base,
                              struct ioapic_domain_cfg *cfg);
 extern int mp_unregister_ioapic(u32 gsi_base);
 extern int mp_ioapic_registered(u32 gsi_base);
-extern int mp_irqdomain_map(struct irq_domain *domain, unsigned int virq,
-                           irq_hw_number_t hwirq);
-extern void mp_irqdomain_unmap(struct irq_domain *domain, unsigned int virq);
-extern int mp_set_gsi_attr(u32 gsi, int trigger, int polarity, int node);
-extern void __init pre_init_apic_IRQ0(void);
+
+extern void ioapic_set_alloc_attr(struct irq_alloc_info *info,
+                                 int node, int trigger, int polarity);
 
 extern void mp_save_irq(struct mpc_intsrc *m);
 
 extern void disable_ioapic_support(void);
 
-extern void __init native_io_apic_init_mappings(void);
+extern void __init io_apic_init_mappings(void);
 extern unsigned int native_io_apic_read(unsigned int apic, unsigned int reg);
-extern void native_io_apic_write(unsigned int apic, unsigned int reg, unsigned int val);
-extern void native_io_apic_modify(unsigned int apic, unsigned int reg, unsigned int val);
 extern void native_disable_io_apic(void);
-extern void native_io_apic_print_entries(unsigned int apic, unsigned int nr_entries);
-extern void intel_ir_io_apic_print_entries(unsigned int apic, unsigned int nr_entries);
-extern int native_ioapic_set_affinity(struct irq_data *,
-                                     const struct cpumask *,
-                                     bool);
 
 static inline unsigned int io_apic_read(unsigned int apic, unsigned int reg)
 {
        return x86_io_apic_ops.read(apic, reg);
 }
 
-static inline void io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
-{
-       x86_io_apic_ops.write(apic, reg, value);
-}
-static inline void io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
-{
-       x86_io_apic_ops.modify(apic, reg, value);
-}
-
-extern void io_apic_eoi(unsigned int apic, unsigned int vector);
-
 extern void setup_IO_APIC(void);
 extern void enable_IO_APIC(void);
 extern void disable_IO_APIC(void);
@@ -253,8 +205,12 @@ static inline int arch_early_ioapic_init(void) { return 0; }
 static inline void print_IO_APICs(void) {}
 #define gsi_top (NR_IRQS_LEGACY)
 static inline int mp_find_ioapic(u32 gsi) { return 0; }
-static inline u32 mp_pin_to_gsi(int ioapic, int pin) { return UINT_MAX; }
-static inline int mp_map_gsi_to_irq(u32 gsi, unsigned int flags) { return gsi; }
+static inline int mp_map_gsi_to_irq(u32 gsi, unsigned int flags,
+                                   struct irq_alloc_info *info)
+{
+       return gsi;
+}
+
 static inline void mp_unmap_irq(int irq) { }
 
 static inline int save_ioapic_entries(void)
@@ -268,17 +224,11 @@ static inline int restore_ioapic_entries(void)
        return -ENOMEM;
 }
 
-static inline void mp_save_irq(struct mpc_intsrc *m) { };
+static inline void mp_save_irq(struct mpc_intsrc *m) { }
 static inline void disable_ioapic_support(void) { }
-#define native_io_apic_init_mappings   NULL
+static inline void io_apic_init_mappings(void) { }
 #define native_io_apic_read            NULL
-#define native_io_apic_write           NULL
-#define native_io_apic_modify          NULL
 #define native_disable_io_apic         NULL
-#define native_io_apic_print_entries   NULL
-#define native_ioapic_set_affinity     NULL
-#define native_setup_ioapic_entry      NULL
-#define native_eoi_ioapic_pin          NULL
 
 static inline void setup_IO_APIC(void) { }
 static inline void enable_IO_APIC(void) { }
index a80cbb88ea911e0ab855e804aaca7eac41ad0a85..8008d06581c7f4d3e6a5680b45b8b841ad078788 100644 (file)
@@ -30,6 +30,10 @@ extern void fixup_irqs(void);
 extern void irq_force_complete_move(int);
 #endif
 
+#ifdef CONFIG_HAVE_KVM
+extern void kvm_set_posted_intr_wakeup_handler(void (*handler)(void));
+#endif
+
 extern void (*x86_platform_ipi_callback)(void);
 extern void native_init_IRQ(void);
 extern bool handle_irq(unsigned irq, struct pt_regs *regs);
index 6224d316c405c444553877845385e2d7b151c161..046c7fb1ca4332ef19044f5a482c1d0a3e3fc632 100644 (file)
 #ifndef __X86_IRQ_REMAPPING_H
 #define __X86_IRQ_REMAPPING_H
 
+#include <asm/irqdomain.h>
+#include <asm/hw_irq.h>
 #include <asm/io_apic.h>
 
-struct IO_APIC_route_entry;
-struct io_apic_irq_attr;
-struct irq_chip;
 struct msi_msg;
-struct pci_dev;
-struct irq_cfg;
+struct irq_alloc_info;
+
+enum irq_remap_cap {
+       IRQ_POSTING_CAP = 0,
+};
 
 #ifdef CONFIG_IRQ_REMAP
 
+extern bool irq_remapping_cap(enum irq_remap_cap cap);
 extern void set_irq_remapping_broken(void);
 extern int irq_remapping_prepare(void);
 extern int irq_remapping_enable(void);
 extern void irq_remapping_disable(void);
 extern int irq_remapping_reenable(int);
 extern int irq_remap_enable_fault_handling(void);
-extern int setup_ioapic_remapped_entry(int irq,
-                                      struct IO_APIC_route_entry *entry,
-                                      unsigned int destination,
-                                      int vector,
-                                      struct io_apic_irq_attr *attr);
-extern void free_remapped_irq(int irq);
-extern void compose_remapped_msi_msg(struct pci_dev *pdev,
-                                    unsigned int irq, unsigned int dest,
-                                    struct msi_msg *msg, u8 hpet_id);
-extern int setup_hpet_msi_remapped(unsigned int irq, unsigned int id);
 extern void panic_if_irq_remap(const char *msg);
-extern bool setup_remapped_irq(int irq,
-                              struct irq_cfg *cfg,
-                              struct irq_chip *chip);
 
-void irq_remap_modify_chip_defaults(struct irq_chip *chip);
+extern struct irq_domain *
+irq_remapping_get_ir_irq_domain(struct irq_alloc_info *info);
+extern struct irq_domain *
+irq_remapping_get_irq_domain(struct irq_alloc_info *info);
+
+/* Create PCI MSI/MSIx irqdomain, use @parent as the parent irqdomain. */
+extern struct irq_domain *arch_create_msi_irq_domain(struct irq_domain *parent);
+
+/* Get parent irqdomain for interrupt remapping irqdomain */
+static inline struct irq_domain *arch_get_ir_parent_domain(void)
+{
+       return x86_vector_domain;
+}
+
+struct vcpu_data {
+       u64 pi_desc_addr;       /* Physical address of PI Descriptor */
+       u32 vector;             /* Guest vector of the interrupt */
+};
 
 #else  /* CONFIG_IRQ_REMAP */
 
+static inline bool irq_remapping_cap(enum irq_remap_cap cap) { return 0; }
 static inline void set_irq_remapping_broken(void) { }
 static inline int irq_remapping_prepare(void) { return -ENODEV; }
 static inline int irq_remapping_enable(void) { return -ENODEV; }
 static inline void irq_remapping_disable(void) { }
 static inline int irq_remapping_reenable(int eim) { return -ENODEV; }
 static inline int irq_remap_enable_fault_handling(void) { return -ENODEV; }
-static inline int setup_ioapic_remapped_entry(int irq,
-                                             struct IO_APIC_route_entry *entry,
-                                             unsigned int destination,
-                                             int vector,
-                                             struct io_apic_irq_attr *attr)
-{
-       return -ENODEV;
-}
-static inline void free_remapped_irq(int irq) { }
-static inline void compose_remapped_msi_msg(struct pci_dev *pdev,
-                                           unsigned int irq, unsigned int dest,
-                                           struct msi_msg *msg, u8 hpet_id)
-{
-}
-static inline int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
-{
-       return -ENODEV;
-}
 
 static inline void panic_if_irq_remap(const char *msg)
 {
 }
 
-static inline void irq_remap_modify_chip_defaults(struct irq_chip *chip)
+static inline struct irq_domain *
+irq_remapping_get_ir_irq_domain(struct irq_alloc_info *info)
 {
+       return NULL;
 }
 
-static inline bool setup_remapped_irq(int irq,
-                                     struct irq_cfg *cfg,
-                                     struct irq_chip *chip)
+static inline struct irq_domain *
+irq_remapping_get_irq_domain(struct irq_alloc_info *info)
 {
-       return false;
+       return NULL;
 }
-#endif /* CONFIG_IRQ_REMAP */
-
-#define dmar_alloc_hwirq()     irq_alloc_hwirq(-1)
-#define dmar_free_hwirq                irq_free_hwirq
 
+#endif /* CONFIG_IRQ_REMAP */
 #endif /* __X86_IRQ_REMAPPING_H */
index 666c89ec4bd7298c1114e7e220a3fee3ebe77bc8..4c2d2eb2060a0b4b74d71bae834e49e51e777cff 100644 (file)
 #define IRQ_MOVE_CLEANUP_VECTOR                FIRST_EXTERNAL_VECTOR
 
 #define IA32_SYSCALL_VECTOR            0x80
-#ifdef CONFIG_X86_32
-# define SYSCALL_VECTOR                        0x80
-#endif
 
 /*
  * Vectors 0x30-0x3f are used for ISA interrupts.
  *   round up to the next 16-vector boundary
  */
-#define IRQ0_VECTOR                    ((FIRST_EXTERNAL_VECTOR + 16) & ~15)
-
-#define IRQ1_VECTOR                    (IRQ0_VECTOR +  1)
-#define IRQ2_VECTOR                    (IRQ0_VECTOR +  2)
-#define IRQ3_VECTOR                    (IRQ0_VECTOR +  3)
-#define IRQ4_VECTOR                    (IRQ0_VECTOR +  4)
-#define IRQ5_VECTOR                    (IRQ0_VECTOR +  5)
-#define IRQ6_VECTOR                    (IRQ0_VECTOR +  6)
-#define IRQ7_VECTOR                    (IRQ0_VECTOR +  7)
-#define IRQ8_VECTOR                    (IRQ0_VECTOR +  8)
-#define IRQ9_VECTOR                    (IRQ0_VECTOR +  9)
-#define IRQ10_VECTOR                   (IRQ0_VECTOR + 10)
-#define IRQ11_VECTOR                   (IRQ0_VECTOR + 11)
-#define IRQ12_VECTOR                   (IRQ0_VECTOR + 12)
-#define IRQ13_VECTOR                   (IRQ0_VECTOR + 13)
-#define IRQ14_VECTOR                   (IRQ0_VECTOR + 14)
-#define IRQ15_VECTOR                   (IRQ0_VECTOR + 15)
+#define ISA_IRQ_VECTOR(irq)            (((FIRST_EXTERNAL_VECTOR + 16) & ~15) + irq)
 
 /*
  * Special IRQ vectors used by the SMP architecture, 0xf0-0xff
  */
 #define X86_PLATFORM_IPI_VECTOR                0xf7
 
-/* Vector for KVM to deliver posted interrupt IPI */
-#ifdef CONFIG_HAVE_KVM
-#define POSTED_INTR_VECTOR             0xf2
-#endif
-
+#define POSTED_INTR_WAKEUP_VECTOR      0xf1
 /*
  * IRQ work vector:
  */
 #define IRQ_WORK_VECTOR                        0xf6
 
 #define UV_BAU_MESSAGE                 0xf5
+#define DEFERRED_ERROR_VECTOR          0xf4
 
 /* Vector on which hypervisor callbacks will be delivered */
 #define HYPERVISOR_CALLBACK_VECTOR     0xf3
 
+/* Vector for KVM to deliver posted interrupt IPI */
+#ifdef CONFIG_HAVE_KVM
+#define POSTED_INTR_VECTOR             0xf2
+#endif
+
 /*
  * Local APIC timer IRQ vector is on a different priority level,
  * to work around the 'lost local interrupt if more than 2 IRQ
@@ -155,18 +138,22 @@ static inline int invalid_vm86_irq(int irq)
  * static arrays.
  */
 
-#define NR_IRQS_LEGACY                   16
+#define NR_IRQS_LEGACY                 16
 
-#define IO_APIC_VECTOR_LIMIT           ( 32 * MAX_IO_APICS )
+#define CPU_VECTOR_LIMIT               (64 * NR_CPUS)
+#define IO_APIC_VECTOR_LIMIT           (32 * MAX_IO_APICS)
 
-#ifdef CONFIG_X86_IO_APIC
-# define CPU_VECTOR_LIMIT              (64 * NR_CPUS)
-# define NR_IRQS                                       \
+#if defined(CONFIG_X86_IO_APIC) && defined(CONFIG_PCI_MSI)
+#define NR_IRQS                                                \
        (CPU_VECTOR_LIMIT > IO_APIC_VECTOR_LIMIT ?      \
                (NR_VECTORS + CPU_VECTOR_LIMIT)  :      \
                (NR_VECTORS + IO_APIC_VECTOR_LIMIT))
-#else /* !CONFIG_X86_IO_APIC: */
-# define NR_IRQS                       NR_IRQS_LEGACY
+#elif defined(CONFIG_X86_IO_APIC)
+#define        NR_IRQS                         (NR_VECTORS + IO_APIC_VECTOR_LIMIT)
+#elif defined(CONFIG_PCI_MSI)
+#define NR_IRQS                                (NR_VECTORS + CPU_VECTOR_LIMIT)
+#else
+#define NR_IRQS                                NR_IRQS_LEGACY
 #endif
 
 #endif /* _ASM_X86_IRQ_VECTORS_H */
diff --git a/arch/x86/include/asm/irqdomain.h b/arch/x86/include/asm/irqdomain.h
new file mode 100644 (file)
index 0000000..d26075b
--- /dev/null
@@ -0,0 +1,63 @@
+#ifndef _ASM_IRQDOMAIN_H
+#define _ASM_IRQDOMAIN_H
+
+#include <linux/irqdomain.h>
+#include <asm/hw_irq.h>
+
+#ifdef CONFIG_X86_LOCAL_APIC
+enum {
+       /* Allocate contiguous CPU vectors */
+       X86_IRQ_ALLOC_CONTIGUOUS_VECTORS                = 0x1,
+};
+
+extern struct irq_domain *x86_vector_domain;
+
+extern void init_irq_alloc_info(struct irq_alloc_info *info,
+                               const struct cpumask *mask);
+extern void copy_irq_alloc_info(struct irq_alloc_info *dst,
+                               struct irq_alloc_info *src);
+#endif /* CONFIG_X86_LOCAL_APIC */
+
+#ifdef CONFIG_X86_IO_APIC
+struct device_node;
+struct irq_data;
+
+enum ioapic_domain_type {
+       IOAPIC_DOMAIN_INVALID,
+       IOAPIC_DOMAIN_LEGACY,
+       IOAPIC_DOMAIN_STRICT,
+       IOAPIC_DOMAIN_DYNAMIC,
+};
+
+struct ioapic_domain_cfg {
+       enum ioapic_domain_type         type;
+       const struct irq_domain_ops     *ops;
+       struct device_node              *dev;
+};
+
+extern const struct irq_domain_ops mp_ioapic_irqdomain_ops;
+
+extern int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
+                             unsigned int nr_irqs, void *arg);
+extern void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq,
+                             unsigned int nr_irqs);
+extern void mp_irqdomain_activate(struct irq_domain *domain,
+                                 struct irq_data *irq_data);
+extern void mp_irqdomain_deactivate(struct irq_domain *domain,
+                                   struct irq_data *irq_data);
+extern int mp_irqdomain_ioapic_idx(struct irq_domain *domain);
+#endif /* CONFIG_X86_IO_APIC */
+
+#ifdef CONFIG_PCI_MSI
+extern void arch_init_msi_domain(struct irq_domain *domain);
+#else
+static inline void arch_init_msi_domain(struct irq_domain *domain) { }
+#endif
+
+#ifdef CONFIG_HT_IRQ
+extern void arch_init_htirq_domain(struct irq_domain *domain);
+#else
+static inline void arch_init_htirq_domain(struct irq_domain *domain) { }
+#endif
+
+#endif
index dea2e7e962e3e0648c9ecaaaffc5cb723b32f299..f8c0ec3a4a979f75cdc2cf321739ff8c655450dd 100644 (file)
@@ -207,6 +207,7 @@ union kvm_mmu_page_role {
                unsigned nxe:1;
                unsigned cr0_wp:1;
                unsigned smep_andnot_wp:1;
+               unsigned smap_andnot_wp:1;
        };
 };
 
@@ -400,6 +401,7 @@ struct kvm_vcpu_arch {
        struct kvm_mmu_memory_cache mmu_page_header_cache;
 
        struct fpu guest_fpu;
+       bool eager_fpu;
        u64 xcr0;
        u64 guest_supported_xcr0;
        u32 guest_xstate_size;
@@ -743,6 +745,7 @@ struct kvm_x86_ops {
        void (*cache_reg)(struct kvm_vcpu *vcpu, enum kvm_reg reg);
        unsigned long (*get_rflags)(struct kvm_vcpu *vcpu);
        void (*set_rflags)(struct kvm_vcpu *vcpu, unsigned long rflags);
+       void (*fpu_activate)(struct kvm_vcpu *vcpu);
        void (*fpu_deactivate)(struct kvm_vcpu *vcpu);
 
        void (*tlb_flush)(struct kvm_vcpu *vcpu);
@@ -999,8 +1002,6 @@ void kvm_pic_clear_all(struct kvm_pic *pic, int irq_source_id);
 
 void kvm_inject_nmi(struct kvm_vcpu *vcpu);
 
-int fx_init(struct kvm_vcpu *vcpu);
-
 void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
                       const u8 *new, int bytes);
 int kvm_mmu_unprotect_page(struct kvm *kvm, gfn_t gfn);
index 1f5a86d518db379ea65c100158df0c60988bb810..982dfc3679ad3d9cd741d75f8799bc1b8b8c5ef7 100644 (file)
 #define MCG_EXT_CNT(c)         (((c) & MCG_EXT_CNT_MASK) >> MCG_EXT_CNT_SHIFT)
 #define MCG_SER_P              (1ULL<<24)   /* MCA recovery/new status bits */
 #define MCG_ELOG_P             (1ULL<<26)   /* Extended error log supported */
+#define MCG_LMCE_P             (1ULL<<27)   /* Local machine check supported */
 
 /* MCG_STATUS register defines */
 #define MCG_STATUS_RIPV  (1ULL<<0)   /* restart ip valid */
 #define MCG_STATUS_EIPV  (1ULL<<1)   /* ip points to correct instruction */
 #define MCG_STATUS_MCIP  (1ULL<<2)   /* machine check in progress */
+#define MCG_STATUS_LMCES (1ULL<<3)   /* LMCE signaled */
+
+/* MCG_EXT_CTL register defines */
+#define MCG_EXT_CTL_LMCE_EN (1ULL<<0) /* Enable LMCE */
 
 /* MCi_STATUS register defines */
 #define MCI_STATUS_VAL   (1ULL<<63)  /* valid error */
@@ -104,6 +109,7 @@ struct mce_log {
 struct mca_config {
        bool dont_log_ce;
        bool cmci_disabled;
+       bool lmce_disabled;
        bool ignore_ce;
        bool disabled;
        bool ser;
@@ -117,8 +123,19 @@ struct mca_config {
 };
 
 struct mce_vendor_flags {
-       __u64           overflow_recov  : 1, /* cpuid_ebx(80000007) */
-                       __reserved_0    : 63;
+                       /*
+                        * overflow recovery cpuid bit indicates that overflow
+                        * conditions are not fatal
+                        */
+       __u64           overflow_recov  : 1,
+
+                       /*
+                        * SUCCOR stands for S/W UnCorrectable error COntainment
+                        * and Recovery. It indicates support for data poisoning
+                        * in HW and deferred error interrupts.
+                        */
+                       succor          : 1,
+                       __reserved_0    : 62;
 };
 extern struct mce_vendor_flags mce_flags;
 
@@ -168,12 +185,16 @@ void cmci_clear(void);
 void cmci_reenable(void);
 void cmci_rediscover(void);
 void cmci_recheck(void);
+void lmce_clear(void);
+void lmce_enable(void);
 #else
 static inline void mce_intel_feature_init(struct cpuinfo_x86 *c) { }
 static inline void cmci_clear(void) {}
 static inline void cmci_reenable(void) {}
 static inline void cmci_rediscover(void) {}
 static inline void cmci_recheck(void) {}
+static inline void lmce_clear(void) {}
+static inline void lmce_enable(void) {}
 #endif
 
 #ifdef CONFIG_X86_MCE_AMD
@@ -223,6 +244,9 @@ void do_machine_check(struct pt_regs *, long);
 extern void (*mce_threshold_vector)(void);
 extern void (*threshold_cpu_callback)(unsigned long action, unsigned int cpu);
 
+/* Deferred error interrupt handler */
+extern void (*deferred_error_int_vector)(void);
+
 /*
  * Thermal handler
  */
index 2fb20d6f7e23b0ccace549901dacf89b51e9c381..9e6278c7140eac3cac2e5841002300c3b903e457 100644 (file)
@@ -1,6 +1,8 @@
 #ifndef _ASM_X86_MICROCODE_H
 #define _ASM_X86_MICROCODE_H
 
+#include <linux/earlycpio.h>
+
 #define native_rdmsr(msr, val1, val2)                  \
 do {                                                   \
        u64 __val = native_read_msr((msr));             \
@@ -152,6 +154,7 @@ extern void __init load_ucode_bsp(void);
 extern void load_ucode_ap(void);
 extern int __init save_microcode_in_initrd(void);
 void reload_early_microcode(void);
+extern bool get_builtin_firmware(struct cpio_data *cd, const char *name);
 #else
 static inline void __init load_ucode_bsp(void) {}
 static inline void load_ucode_ap(void) {}
@@ -160,6 +163,9 @@ static inline int __init save_microcode_in_initrd(void)
        return 0;
 }
 static inline void reload_early_microcode(void) {}
+static inline bool get_builtin_firmware(struct cpio_data *cd, const char *name)
+{
+       return false;
+}
 #endif
-
 #endif /* _ASM_X86_MICROCODE_H */
index af935397e053e4a504daed8c14cfc06686a08794..ac6d328977a67e4fd0ae5674e2d7e8a30539ff92 100644 (file)
@@ -65,12 +65,12 @@ extern enum ucode_state load_microcode_amd(int cpu, u8 family, const u8 *data, s
 extern u8 amd_ucode_patch[PATCH_MAX_SIZE];
 
 #ifdef CONFIG_MICROCODE_AMD_EARLY
-extern void __init load_ucode_amd_bsp(void);
+extern void __init load_ucode_amd_bsp(unsigned int family);
 extern void load_ucode_amd_ap(void);
 extern int __init save_microcode_in_initrd_amd(void);
 void reload_ucode_amd(void);
 #else
-static inline void __init load_ucode_amd_bsp(void) {}
+static inline void __init load_ucode_amd_bsp(unsigned int family) {}
 static inline void load_ucode_amd_ap(void) {}
 static inline int __init save_microcode_in_initrd_amd(void) { return -EINVAL; }
 void reload_ucode_amd(void) {}
index 2b9209c46ca939991abed04a1c5d4ef786b0c698..7991c606125d01b137a6f0e1ed16f1e8c1d86015 100644 (file)
@@ -51,20 +51,11 @@ struct extended_sigtable {
        (((struct microcode_intel *)mc)->hdr.datasize ? \
         ((struct microcode_intel *)mc)->hdr.datasize : DEFAULT_UCODE_DATASIZE)
 
-#define sigmatch(s1, s2, p1, p2) \
-       (((s1) == (s2)) && (((p1) & (p2)) || (((p1) == 0) && ((p2) == 0))))
-
 #define exttable_size(et) ((et)->count * EXT_SIGNATURE_SIZE + EXT_HEADER_SIZE)
 
-extern int get_matching_microcode(unsigned int csig, int cpf, int rev, void *mc);
+extern int has_newer_microcode(void *mc, unsigned int csig, int cpf, int rev);
 extern int microcode_sanity_check(void *mc, int print_err);
-extern int get_matching_sig(unsigned int csig, int cpf, int rev, void *mc);
-
-static inline int
-revision_is_newer(struct microcode_header_intel *mc_header, int rev)
-{
-       return (mc_header->rev <= rev) ? 0 : 1;
-}
+extern int find_matching_signature(void *mc, unsigned int csig, int cpf);
 
 #ifdef CONFIG_MICROCODE_INTEL_EARLY
 extern void __init load_ucode_intel_bsp(void);
index 883f6b933fa4b6501af7a050fc161eafdb8f8d91..5e8daee7c5c94be6fc48bc7f32e593fb46948557 100644 (file)
@@ -142,6 +142,19 @@ static inline void arch_exit_mmap(struct mm_struct *mm)
        paravirt_arch_exit_mmap(mm);
 }
 
+#ifdef CONFIG_X86_64
+static inline bool is_64bit_mm(struct mm_struct *mm)
+{
+       return  !config_enabled(CONFIG_IA32_EMULATION) ||
+               !(mm->context.ia32_compat == TIF_IA32);
+}
+#else
+static inline bool is_64bit_mm(struct mm_struct *mm)
+{
+       return false;
+}
+#endif
+
 static inline void arch_bprm_mm_init(struct mm_struct *mm,
                struct vm_area_struct *vma)
 {
index a952a13d59a71bb354699cacd918d5583984693c..7a35495275a9b7b1150bde2935757a212defde59 100644 (file)
 #define MPX_BNDCFG_ENABLE_FLAG 0x1
 #define MPX_BD_ENTRY_VALID_FLAG        0x1
 
-#ifdef CONFIG_X86_64
-
-/* upper 28 bits [47:20] of the virtual address in 64-bit used to
- * index into bounds directory (BD).
- */
-#define MPX_BD_ENTRY_OFFSET    28
-#define MPX_BD_ENTRY_SHIFT     3
-/* bits [19:3] of the virtual address in 64-bit used to index into
- * bounds table (BT).
+/*
+ * The upper 28 bits [47:20] of the virtual address in 64-bit
+ * are used to index into bounds directory (BD).
+ *
+ * The directory is 2G (2^31) in size, and with 8-byte entries
+ * it has 2^28 entries.
  */
-#define MPX_BT_ENTRY_OFFSET    17
-#define MPX_BT_ENTRY_SHIFT     5
-#define MPX_IGN_BITS           3
-#define MPX_BD_ENTRY_TAIL      3
+#define MPX_BD_SIZE_BYTES_64   (1UL<<31)
+#define MPX_BD_ENTRY_BYTES_64  8
+#define MPX_BD_NR_ENTRIES_64   (MPX_BD_SIZE_BYTES_64/MPX_BD_ENTRY_BYTES_64)
 
-#else
-
-#define MPX_BD_ENTRY_OFFSET    20
-#define MPX_BD_ENTRY_SHIFT     2
-#define MPX_BT_ENTRY_OFFSET    10
-#define MPX_BT_ENTRY_SHIFT     4
-#define MPX_IGN_BITS           2
-#define MPX_BD_ENTRY_TAIL      2
+/*
+ * The 32-bit directory is 4MB (2^22) in size, and with 4-byte
+ * entries it has 2^20 entries.
+ */
+#define MPX_BD_SIZE_BYTES_32   (1UL<<22)
+#define MPX_BD_ENTRY_BYTES_32  4
+#define MPX_BD_NR_ENTRIES_32   (MPX_BD_SIZE_BYTES_32/MPX_BD_ENTRY_BYTES_32)
 
-#endif
+/*
+ * A 64-bit table is 4MB total in size, and an entry is
+ * 4 64-bit pointers in size.
+ */
+#define MPX_BT_SIZE_BYTES_64   (1UL<<22)
+#define MPX_BT_ENTRY_BYTES_64  32
+#define MPX_BT_NR_ENTRIES_64   (MPX_BT_SIZE_BYTES_64/MPX_BT_ENTRY_BYTES_64)
 
-#define MPX_BD_SIZE_BYTES (1UL<<(MPX_BD_ENTRY_OFFSET+MPX_BD_ENTRY_SHIFT))
-#define MPX_BT_SIZE_BYTES (1UL<<(MPX_BT_ENTRY_OFFSET+MPX_BT_ENTRY_SHIFT))
+/*
+ * A 32-bit table is 16kB total in size, and an entry is
+ * 4 32-bit pointers in size.
+ */
+#define MPX_BT_SIZE_BYTES_32   (1UL<<14)
+#define MPX_BT_ENTRY_BYTES_32  16
+#define MPX_BT_NR_ENTRIES_32   (MPX_BT_SIZE_BYTES_32/MPX_BT_ENTRY_BYTES_32)
 
 #define MPX_BNDSTA_TAIL                2
 #define MPX_BNDCFG_TAIL                12
 #define MPX_BNDSTA_ADDR_MASK   (~((1UL<<MPX_BNDSTA_TAIL)-1))
-#define MPX_BNDCFG_ADDR_MASK   (~((1UL<<MPX_BNDCFG_TAIL)-1))
-#define MPX_BT_ADDR_MASK       (~((1UL<<MPX_BD_ENTRY_TAIL)-1))
-
 #define MPX_BNDCFG_ADDR_MASK   (~((1UL<<MPX_BNDCFG_TAIL)-1))
 #define MPX_BNDSTA_ERROR_CODE  0x3
 
-#define MPX_BD_ENTRY_MASK      ((1<<MPX_BD_ENTRY_OFFSET)-1)
-#define MPX_BT_ENTRY_MASK      ((1<<MPX_BT_ENTRY_OFFSET)-1)
-#define MPX_GET_BD_ENTRY_OFFSET(addr)  ((((addr)>>(MPX_BT_ENTRY_OFFSET+ \
-               MPX_IGN_BITS)) & MPX_BD_ENTRY_MASK) << MPX_BD_ENTRY_SHIFT)
-#define MPX_GET_BT_ENTRY_OFFSET(addr)  ((((addr)>>MPX_IGN_BITS) & \
-               MPX_BT_ENTRY_MASK) << MPX_BT_ENTRY_SHIFT)
-
 #ifdef CONFIG_X86_INTEL_MPX
-siginfo_t *mpx_generate_siginfo(struct pt_regs *regs,
-                               struct xsave_struct *xsave_buf);
-int mpx_handle_bd_fault(struct xsave_struct *xsave_buf);
+siginfo_t *mpx_generate_siginfo(struct pt_regs *regs);
+int mpx_handle_bd_fault(void);
 static inline int kernel_managing_mpx_tables(struct mm_struct *mm)
 {
        return (mm->bd_addr != MPX_INVALID_BOUNDS_DIR);
@@ -77,12 +72,11 @@ static inline void mpx_mm_init(struct mm_struct *mm)
 void mpx_notify_unmap(struct mm_struct *mm, struct vm_area_struct *vma,
                      unsigned long start, unsigned long end);
 #else
-static inline siginfo_t *mpx_generate_siginfo(struct pt_regs *regs,
-                                             struct xsave_struct *xsave_buf)
+static inline siginfo_t *mpx_generate_siginfo(struct pt_regs *regs)
 {
        return NULL;
 }
-static inline int mpx_handle_bd_fault(struct xsave_struct *xsave_buf)
+static inline int mpx_handle_bd_fault(void)
 {
        return -EINVAL;
 }
diff --git a/arch/x86/include/asm/msi.h b/arch/x86/include/asm/msi.h
new file mode 100644 (file)
index 0000000..93724cc
--- /dev/null
@@ -0,0 +1,7 @@
+#ifndef _ASM_X86_MSI_H
+#define _ASM_X86_MSI_H
+#include <asm/hw_irq.h>
+
+typedef struct irq_alloc_info msi_alloc_info_t;
+
+#endif /* _ASM_X86_MSI_H */
diff --git a/arch/x86/include/asm/msr-index.h b/arch/x86/include/asm/msr-index.h
new file mode 100644 (file)
index 0000000..9ebc3d0
--- /dev/null
@@ -0,0 +1,665 @@
+#ifndef _ASM_X86_MSR_INDEX_H
+#define _ASM_X86_MSR_INDEX_H
+
+/* CPU model specific register (MSR) numbers */
+
+/* x86-64 specific MSRs */
+#define MSR_EFER               0xc0000080 /* extended feature register */
+#define MSR_STAR               0xc0000081 /* legacy mode SYSCALL target */
+#define MSR_LSTAR              0xc0000082 /* long mode SYSCALL target */
+#define MSR_CSTAR              0xc0000083 /* compat mode SYSCALL target */
+#define MSR_SYSCALL_MASK       0xc0000084 /* EFLAGS mask for syscall */
+#define MSR_FS_BASE            0xc0000100 /* 64bit FS base */
+#define MSR_GS_BASE            0xc0000101 /* 64bit GS base */
+#define MSR_KERNEL_GS_BASE     0xc0000102 /* SwapGS GS shadow */
+#define MSR_TSC_AUX            0xc0000103 /* Auxiliary TSC */
+
+/* EFER bits: */
+#define _EFER_SCE              0  /* SYSCALL/SYSRET */
+#define _EFER_LME              8  /* Long mode enable */
+#define _EFER_LMA              10 /* Long mode active (read-only) */
+#define _EFER_NX               11 /* No execute enable */
+#define _EFER_SVME             12 /* Enable virtualization */
+#define _EFER_LMSLE            13 /* Long Mode Segment Limit Enable */
+#define _EFER_FFXSR            14 /* Enable Fast FXSAVE/FXRSTOR */
+
+#define EFER_SCE               (1<<_EFER_SCE)
+#define EFER_LME               (1<<_EFER_LME)
+#define EFER_LMA               (1<<_EFER_LMA)
+#define EFER_NX                        (1<<_EFER_NX)
+#define EFER_SVME              (1<<_EFER_SVME)
+#define EFER_LMSLE             (1<<_EFER_LMSLE)
+#define EFER_FFXSR             (1<<_EFER_FFXSR)
+
+/* Intel MSRs. Some also available on other CPUs */
+#define MSR_IA32_PERFCTR0              0x000000c1
+#define MSR_IA32_PERFCTR1              0x000000c2
+#define MSR_FSB_FREQ                   0x000000cd
+#define MSR_NHM_PLATFORM_INFO          0x000000ce
+
+#define MSR_NHM_SNB_PKG_CST_CFG_CTL    0x000000e2
+#define NHM_C3_AUTO_DEMOTE             (1UL << 25)
+#define NHM_C1_AUTO_DEMOTE             (1UL << 26)
+#define ATM_LNC_C6_AUTO_DEMOTE         (1UL << 25)
+#define SNB_C1_AUTO_UNDEMOTE           (1UL << 27)
+#define SNB_C3_AUTO_UNDEMOTE           (1UL << 28)
+
+#define MSR_PLATFORM_INFO              0x000000ce
+#define MSR_MTRRcap                    0x000000fe
+#define MSR_IA32_BBL_CR_CTL            0x00000119
+#define MSR_IA32_BBL_CR_CTL3           0x0000011e
+
+#define MSR_IA32_SYSENTER_CS           0x00000174
+#define MSR_IA32_SYSENTER_ESP          0x00000175
+#define MSR_IA32_SYSENTER_EIP          0x00000176
+
+#define MSR_IA32_MCG_CAP               0x00000179
+#define MSR_IA32_MCG_STATUS            0x0000017a
+#define MSR_IA32_MCG_CTL               0x0000017b
+#define MSR_IA32_MCG_EXT_CTL           0x000004d0
+
+#define MSR_OFFCORE_RSP_0              0x000001a6
+#define MSR_OFFCORE_RSP_1              0x000001a7
+#define MSR_NHM_TURBO_RATIO_LIMIT      0x000001ad
+#define MSR_IVT_TURBO_RATIO_LIMIT      0x000001ae
+#define MSR_TURBO_RATIO_LIMIT          0x000001ad
+#define MSR_TURBO_RATIO_LIMIT1         0x000001ae
+#define MSR_TURBO_RATIO_LIMIT2         0x000001af
+
+#define MSR_LBR_SELECT                 0x000001c8
+#define MSR_LBR_TOS                    0x000001c9
+#define MSR_LBR_NHM_FROM               0x00000680
+#define MSR_LBR_NHM_TO                 0x000006c0
+#define MSR_LBR_CORE_FROM              0x00000040
+#define MSR_LBR_CORE_TO                        0x00000060
+
+#define MSR_IA32_PEBS_ENABLE           0x000003f1
+#define MSR_IA32_DS_AREA               0x00000600
+#define MSR_IA32_PERF_CAPABILITIES     0x00000345
+#define MSR_PEBS_LD_LAT_THRESHOLD      0x000003f6
+
+#define MSR_IA32_RTIT_CTL              0x00000570
+#define RTIT_CTL_TRACEEN               BIT(0)
+#define RTIT_CTL_OS                    BIT(2)
+#define RTIT_CTL_USR                   BIT(3)
+#define RTIT_CTL_CR3EN                 BIT(7)
+#define RTIT_CTL_TOPA                  BIT(8)
+#define RTIT_CTL_TSC_EN                        BIT(10)
+#define RTIT_CTL_DISRETC               BIT(11)
+#define RTIT_CTL_BRANCH_EN             BIT(13)
+#define MSR_IA32_RTIT_STATUS           0x00000571
+#define RTIT_STATUS_CONTEXTEN          BIT(1)
+#define RTIT_STATUS_TRIGGEREN          BIT(2)
+#define RTIT_STATUS_ERROR              BIT(4)
+#define RTIT_STATUS_STOPPED            BIT(5)
+#define MSR_IA32_RTIT_CR3_MATCH                0x00000572
+#define MSR_IA32_RTIT_OUTPUT_BASE      0x00000560
+#define MSR_IA32_RTIT_OUTPUT_MASK      0x00000561
+
+#define MSR_MTRRfix64K_00000           0x00000250
+#define MSR_MTRRfix16K_80000           0x00000258
+#define MSR_MTRRfix16K_A0000           0x00000259
+#define MSR_MTRRfix4K_C0000            0x00000268
+#define MSR_MTRRfix4K_C8000            0x00000269
+#define MSR_MTRRfix4K_D0000            0x0000026a
+#define MSR_MTRRfix4K_D8000            0x0000026b
+#define MSR_MTRRfix4K_E0000            0x0000026c
+#define MSR_MTRRfix4K_E8000            0x0000026d
+#define MSR_MTRRfix4K_F0000            0x0000026e
+#define MSR_MTRRfix4K_F8000            0x0000026f
+#define MSR_MTRRdefType                        0x000002ff
+
+#define MSR_IA32_CR_PAT                        0x00000277
+
+#define MSR_IA32_DEBUGCTLMSR           0x000001d9
+#define MSR_IA32_LASTBRANCHFROMIP      0x000001db
+#define MSR_IA32_LASTBRANCHTOIP                0x000001dc
+#define MSR_IA32_LASTINTFROMIP         0x000001dd
+#define MSR_IA32_LASTINTTOIP           0x000001de
+
+/* DEBUGCTLMSR bits (others vary by model): */
+#define DEBUGCTLMSR_LBR                        (1UL <<  0) /* last branch recording */
+#define DEBUGCTLMSR_BTF                        (1UL <<  1) /* single-step on branches */
+#define DEBUGCTLMSR_TR                 (1UL <<  6)
+#define DEBUGCTLMSR_BTS                        (1UL <<  7)
+#define DEBUGCTLMSR_BTINT              (1UL <<  8)
+#define DEBUGCTLMSR_BTS_OFF_OS         (1UL <<  9)
+#define DEBUGCTLMSR_BTS_OFF_USR                (1UL << 10)
+#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11)
+
+#define MSR_IA32_POWER_CTL             0x000001fc
+
+#define MSR_IA32_MC0_CTL               0x00000400
+#define MSR_IA32_MC0_STATUS            0x00000401
+#define MSR_IA32_MC0_ADDR              0x00000402
+#define MSR_IA32_MC0_MISC              0x00000403
+
+/* C-state Residency Counters */
+#define MSR_PKG_C3_RESIDENCY           0x000003f8
+#define MSR_PKG_C6_RESIDENCY           0x000003f9
+#define MSR_PKG_C7_RESIDENCY           0x000003fa
+#define MSR_CORE_C3_RESIDENCY          0x000003fc
+#define MSR_CORE_C6_RESIDENCY          0x000003fd
+#define MSR_CORE_C7_RESIDENCY          0x000003fe
+#define MSR_KNL_CORE_C6_RESIDENCY      0x000003ff
+#define MSR_PKG_C2_RESIDENCY           0x0000060d
+#define MSR_PKG_C8_RESIDENCY           0x00000630
+#define MSR_PKG_C9_RESIDENCY           0x00000631
+#define MSR_PKG_C10_RESIDENCY          0x00000632
+
+/* Run Time Average Power Limiting (RAPL) Interface */
+
+#define MSR_RAPL_POWER_UNIT            0x00000606
+
+#define MSR_PKG_POWER_LIMIT            0x00000610
+#define MSR_PKG_ENERGY_STATUS          0x00000611
+#define MSR_PKG_PERF_STATUS            0x00000613
+#define MSR_PKG_POWER_INFO             0x00000614
+
+#define MSR_DRAM_POWER_LIMIT           0x00000618
+#define MSR_DRAM_ENERGY_STATUS         0x00000619
+#define MSR_DRAM_PERF_STATUS           0x0000061b
+#define MSR_DRAM_POWER_INFO            0x0000061c
+
+#define MSR_PP0_POWER_LIMIT            0x00000638
+#define MSR_PP0_ENERGY_STATUS          0x00000639
+#define MSR_PP0_POLICY                 0x0000063a
+#define MSR_PP0_PERF_STATUS            0x0000063b
+
+#define MSR_PP1_POWER_LIMIT            0x00000640
+#define MSR_PP1_ENERGY_STATUS          0x00000641
+#define MSR_PP1_POLICY                 0x00000642
+
+#define MSR_PKG_WEIGHTED_CORE_C0_RES   0x00000658
+#define MSR_PKG_ANY_CORE_C0_RES                0x00000659
+#define MSR_PKG_ANY_GFXE_C0_RES                0x0000065A
+#define MSR_PKG_BOTH_CORE_GFXE_C0_RES  0x0000065B
+
+#define MSR_CORE_C1_RES                        0x00000660
+
+#define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668
+#define MSR_MC6_DEMOTION_POLICY_CONFIG 0x00000669
+
+#define MSR_CORE_PERF_LIMIT_REASONS    0x00000690
+#define MSR_GFX_PERF_LIMIT_REASONS     0x000006B0
+#define MSR_RING_PERF_LIMIT_REASONS    0x000006B1
+
+/* Hardware P state interface */
+#define MSR_PPERF                      0x0000064e
+#define MSR_PERF_LIMIT_REASONS         0x0000064f
+#define MSR_PM_ENABLE                  0x00000770
+#define MSR_HWP_CAPABILITIES           0x00000771
+#define MSR_HWP_REQUEST_PKG            0x00000772
+#define MSR_HWP_INTERRUPT              0x00000773
+#define MSR_HWP_REQUEST                0x00000774
+#define MSR_HWP_STATUS                 0x00000777
+
+/* CPUID.6.EAX */
+#define HWP_BASE_BIT                   (1<<7)
+#define HWP_NOTIFICATIONS_BIT          (1<<8)
+#define HWP_ACTIVITY_WINDOW_BIT                (1<<9)
+#define HWP_ENERGY_PERF_PREFERENCE_BIT (1<<10)
+#define HWP_PACKAGE_LEVEL_REQUEST_BIT  (1<<11)
+
+/* IA32_HWP_CAPABILITIES */
+#define HWP_HIGHEST_PERF(x)            (x & 0xff)
+#define HWP_GUARANTEED_PERF(x)         ((x & (0xff << 8)) >>8)
+#define HWP_MOSTEFFICIENT_PERF(x)      ((x & (0xff << 16)) >>16)
+#define HWP_LOWEST_PERF(x)             ((x & (0xff << 24)) >>24)
+
+/* IA32_HWP_REQUEST */
+#define HWP_MIN_PERF(x)                (x & 0xff)
+#define HWP_MAX_PERF(x)                ((x & 0xff) << 8)
+#define HWP_DESIRED_PERF(x)            ((x & 0xff) << 16)
+#define HWP_ENERGY_PERF_PREFERENCE(x)  ((x & 0xff) << 24)
+#define HWP_ACTIVITY_WINDOW(x)         ((x & 0xff3) << 32)
+#define HWP_PACKAGE_CONTROL(x)         ((x & 0x1) << 42)
+
+/* IA32_HWP_STATUS */
+#define HWP_GUARANTEED_CHANGE(x)       (x & 0x1)
+#define HWP_EXCURSION_TO_MINIMUM(x)    (x & 0x4)
+
+/* IA32_HWP_INTERRUPT */
+#define HWP_CHANGE_TO_GUARANTEED_INT(x)        (x & 0x1)
+#define HWP_EXCURSION_TO_MINIMUM_INT(x)        (x & 0x2)
+
+#define MSR_AMD64_MC0_MASK             0xc0010044
+
+#define MSR_IA32_MCx_CTL(x)            (MSR_IA32_MC0_CTL + 4*(x))
+#define MSR_IA32_MCx_STATUS(x)         (MSR_IA32_MC0_STATUS + 4*(x))
+#define MSR_IA32_MCx_ADDR(x)           (MSR_IA32_MC0_ADDR + 4*(x))
+#define MSR_IA32_MCx_MISC(x)           (MSR_IA32_MC0_MISC + 4*(x))
+
+#define MSR_AMD64_MCx_MASK(x)          (MSR_AMD64_MC0_MASK + (x))
+
+/* These are consecutive and not in the normal 4er MCE bank block */
+#define MSR_IA32_MC0_CTL2              0x00000280
+#define MSR_IA32_MCx_CTL2(x)           (MSR_IA32_MC0_CTL2 + (x))
+
+#define MSR_P6_PERFCTR0                        0x000000c1
+#define MSR_P6_PERFCTR1                        0x000000c2
+#define MSR_P6_EVNTSEL0                        0x00000186
+#define MSR_P6_EVNTSEL1                        0x00000187
+
+#define MSR_KNC_PERFCTR0               0x00000020
+#define MSR_KNC_PERFCTR1               0x00000021
+#define MSR_KNC_EVNTSEL0               0x00000028
+#define MSR_KNC_EVNTSEL1               0x00000029
+
+/* Alternative perfctr range with full access. */
+#define MSR_IA32_PMC0                  0x000004c1
+
+/* AMD64 MSRs. Not complete. See the architecture manual for a more
+   complete list. */
+
+#define MSR_AMD64_PATCH_LEVEL          0x0000008b
+#define MSR_AMD64_TSC_RATIO            0xc0000104
+#define MSR_AMD64_NB_CFG               0xc001001f
+#define MSR_AMD64_PATCH_LOADER         0xc0010020
+#define MSR_AMD64_OSVW_ID_LENGTH       0xc0010140
+#define MSR_AMD64_OSVW_STATUS          0xc0010141
+#define MSR_AMD64_LS_CFG               0xc0011020
+#define MSR_AMD64_DC_CFG               0xc0011022
+#define MSR_AMD64_BU_CFG2              0xc001102a
+#define MSR_AMD64_IBSFETCHCTL          0xc0011030
+#define MSR_AMD64_IBSFETCHLINAD                0xc0011031
+#define MSR_AMD64_IBSFETCHPHYSAD       0xc0011032
+#define MSR_AMD64_IBSFETCH_REG_COUNT   3
+#define MSR_AMD64_IBSFETCH_REG_MASK    ((1UL<<MSR_AMD64_IBSFETCH_REG_COUNT)-1)
+#define MSR_AMD64_IBSOPCTL             0xc0011033
+#define MSR_AMD64_IBSOPRIP             0xc0011034
+#define MSR_AMD64_IBSOPDATA            0xc0011035
+#define MSR_AMD64_IBSOPDATA2           0xc0011036
+#define MSR_AMD64_IBSOPDATA3           0xc0011037
+#define MSR_AMD64_IBSDCLINAD           0xc0011038
+#define MSR_AMD64_IBSDCPHYSAD          0xc0011039
+#define MSR_AMD64_IBSOP_REG_COUNT      7
+#define MSR_AMD64_IBSOP_REG_MASK       ((1UL<<MSR_AMD64_IBSOP_REG_COUNT)-1)
+#define MSR_AMD64_IBSCTL               0xc001103a
+#define MSR_AMD64_IBSBRTARGET          0xc001103b
+#define MSR_AMD64_IBSOPDATA4           0xc001103d
+#define MSR_AMD64_IBS_REG_COUNT_MAX    8 /* includes MSR_AMD64_IBSBRTARGET */
+
+/* Fam 16h MSRs */
+#define MSR_F16H_L2I_PERF_CTL          0xc0010230
+#define MSR_F16H_L2I_PERF_CTR          0xc0010231
+#define MSR_F16H_DR1_ADDR_MASK         0xc0011019
+#define MSR_F16H_DR2_ADDR_MASK         0xc001101a
+#define MSR_F16H_DR3_ADDR_MASK         0xc001101b
+#define MSR_F16H_DR0_ADDR_MASK         0xc0011027
+
+/* Fam 15h MSRs */
+#define MSR_F15H_PERF_CTL              0xc0010200
+#define MSR_F15H_PERF_CTR              0xc0010201
+#define MSR_F15H_NB_PERF_CTL           0xc0010240
+#define MSR_F15H_NB_PERF_CTR           0xc0010241
+
+/* Fam 10h MSRs */
+#define MSR_FAM10H_MMIO_CONF_BASE      0xc0010058
+#define FAM10H_MMIO_CONF_ENABLE                (1<<0)
+#define FAM10H_MMIO_CONF_BUSRANGE_MASK 0xf
+#define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2
+#define FAM10H_MMIO_CONF_BASE_MASK     0xfffffffULL
+#define FAM10H_MMIO_CONF_BASE_SHIFT    20
+#define MSR_FAM10H_NODE_ID             0xc001100c
+
+/* K8 MSRs */
+#define MSR_K8_TOP_MEM1                        0xc001001a
+#define MSR_K8_TOP_MEM2                        0xc001001d
+#define MSR_K8_SYSCFG                  0xc0010010
+#define MSR_K8_INT_PENDING_MSG         0xc0010055
+/* C1E active bits in int pending message */
+#define K8_INTP_C1E_ACTIVE_MASK                0x18000000
+#define MSR_K8_TSEG_ADDR               0xc0010112
+#define K8_MTRRFIXRANGE_DRAM_ENABLE    0x00040000 /* MtrrFixDramEn bit    */
+#define K8_MTRRFIXRANGE_DRAM_MODIFY    0x00080000 /* MtrrFixDramModEn bit */
+#define K8_MTRR_RDMEM_WRMEM_MASK       0x18181818 /* Mask: RdMem|WrMem    */
+
+/* K7 MSRs */
+#define MSR_K7_EVNTSEL0                        0xc0010000
+#define MSR_K7_PERFCTR0                        0xc0010004
+#define MSR_K7_EVNTSEL1                        0xc0010001
+#define MSR_K7_PERFCTR1                        0xc0010005
+#define MSR_K7_EVNTSEL2                        0xc0010002
+#define MSR_K7_PERFCTR2                        0xc0010006
+#define MSR_K7_EVNTSEL3                        0xc0010003
+#define MSR_K7_PERFCTR3                        0xc0010007
+#define MSR_K7_CLK_CTL                 0xc001001b
+#define MSR_K7_HWCR                    0xc0010015
+#define MSR_K7_FID_VID_CTL             0xc0010041
+#define MSR_K7_FID_VID_STATUS          0xc0010042
+
+/* K6 MSRs */
+#define MSR_K6_WHCR                    0xc0000082
+#define MSR_K6_UWCCR                   0xc0000085
+#define MSR_K6_EPMR                    0xc0000086
+#define MSR_K6_PSOR                    0xc0000087
+#define MSR_K6_PFIR                    0xc0000088
+
+/* Centaur-Hauls/IDT defined MSRs. */
+#define MSR_IDT_FCR1                   0x00000107
+#define MSR_IDT_FCR2                   0x00000108
+#define MSR_IDT_FCR3                   0x00000109
+#define MSR_IDT_FCR4                   0x0000010a
+
+#define MSR_IDT_MCR0                   0x00000110
+#define MSR_IDT_MCR1                   0x00000111
+#define MSR_IDT_MCR2                   0x00000112
+#define MSR_IDT_MCR3                   0x00000113
+#define MSR_IDT_MCR4                   0x00000114
+#define MSR_IDT_MCR5                   0x00000115
+#define MSR_IDT_MCR6                   0x00000116
+#define MSR_IDT_MCR7                   0x00000117
+#define MSR_IDT_MCR_CTRL               0x00000120
+
+/* VIA Cyrix defined MSRs*/
+#define MSR_VIA_FCR                    0x00001107
+#define MSR_VIA_LONGHAUL               0x0000110a
+#define MSR_VIA_RNG                    0x0000110b
+#define MSR_VIA_BCR2                   0x00001147
+
+/* Transmeta defined MSRs */
+#define MSR_TMTA_LONGRUN_CTRL          0x80868010
+#define MSR_TMTA_LONGRUN_FLAGS         0x80868011
+#define MSR_TMTA_LRTI_READOUT          0x80868018
+#define MSR_TMTA_LRTI_VOLT_MHZ         0x8086801a
+
+/* Intel defined MSRs. */
+#define MSR_IA32_P5_MC_ADDR            0x00000000
+#define MSR_IA32_P5_MC_TYPE            0x00000001
+#define MSR_IA32_TSC                   0x00000010
+#define MSR_IA32_PLATFORM_ID           0x00000017
+#define MSR_IA32_EBL_CR_POWERON                0x0000002a
+#define MSR_EBC_FREQUENCY_ID           0x0000002c
+#define MSR_SMI_COUNT                  0x00000034
+#define MSR_IA32_FEATURE_CONTROL        0x0000003a
+#define MSR_IA32_TSC_ADJUST             0x0000003b
+#define MSR_IA32_BNDCFGS               0x00000d90
+
+#define MSR_IA32_XSS                   0x00000da0
+
+#define FEATURE_CONTROL_LOCKED                         (1<<0)
+#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX       (1<<1)
+#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX      (1<<2)
+#define FEATURE_CONTROL_LMCE                           (1<<20)
+
+#define MSR_IA32_APICBASE              0x0000001b
+#define MSR_IA32_APICBASE_BSP          (1<<8)
+#define MSR_IA32_APICBASE_ENABLE       (1<<11)
+#define MSR_IA32_APICBASE_BASE         (0xfffff<<12)
+
+#define MSR_IA32_TSCDEADLINE           0x000006e0
+
+#define MSR_IA32_UCODE_WRITE           0x00000079
+#define MSR_IA32_UCODE_REV             0x0000008b
+
+#define MSR_IA32_SMM_MONITOR_CTL       0x0000009b
+#define MSR_IA32_SMBASE                        0x0000009e
+
+#define MSR_IA32_PERF_STATUS           0x00000198
+#define MSR_IA32_PERF_CTL              0x00000199
+#define INTEL_PERF_CTL_MASK            0xffff
+#define MSR_AMD_PSTATE_DEF_BASE                0xc0010064
+#define MSR_AMD_PERF_STATUS            0xc0010063
+#define MSR_AMD_PERF_CTL               0xc0010062
+
+#define MSR_IA32_MPERF                 0x000000e7
+#define MSR_IA32_APERF                 0x000000e8
+
+#define MSR_IA32_THERM_CONTROL         0x0000019a
+#define MSR_IA32_THERM_INTERRUPT       0x0000019b
+
+#define THERM_INT_HIGH_ENABLE          (1 << 0)
+#define THERM_INT_LOW_ENABLE           (1 << 1)
+#define THERM_INT_PLN_ENABLE           (1 << 24)
+
+#define MSR_IA32_THERM_STATUS          0x0000019c
+
+#define THERM_STATUS_PROCHOT           (1 << 0)
+#define THERM_STATUS_POWER_LIMIT       (1 << 10)
+
+#define MSR_THERM2_CTL                 0x0000019d
+
+#define MSR_THERM2_CTL_TM_SELECT       (1ULL << 16)
+
+#define MSR_IA32_MISC_ENABLE           0x000001a0
+
+#define MSR_IA32_TEMPERATURE_TARGET    0x000001a2
+
+#define MSR_MISC_PWR_MGMT              0x000001aa
+
+#define MSR_IA32_ENERGY_PERF_BIAS      0x000001b0
+#define ENERGY_PERF_BIAS_PERFORMANCE   0
+#define ENERGY_PERF_BIAS_NORMAL                6
+#define ENERGY_PERF_BIAS_POWERSAVE     15
+
+#define MSR_IA32_PACKAGE_THERM_STATUS          0x000001b1
+
+#define PACKAGE_THERM_STATUS_PROCHOT           (1 << 0)
+#define PACKAGE_THERM_STATUS_POWER_LIMIT       (1 << 10)
+
+#define MSR_IA32_PACKAGE_THERM_INTERRUPT       0x000001b2
+
+#define PACKAGE_THERM_INT_HIGH_ENABLE          (1 << 0)
+#define PACKAGE_THERM_INT_LOW_ENABLE           (1 << 1)
+#define PACKAGE_THERM_INT_PLN_ENABLE           (1 << 24)
+
+/* Thermal Thresholds Support */
+#define THERM_INT_THRESHOLD0_ENABLE    (1 << 15)
+#define THERM_SHIFT_THRESHOLD0        8
+#define THERM_MASK_THRESHOLD0          (0x7f << THERM_SHIFT_THRESHOLD0)
+#define THERM_INT_THRESHOLD1_ENABLE    (1 << 23)
+#define THERM_SHIFT_THRESHOLD1        16
+#define THERM_MASK_THRESHOLD1          (0x7f << THERM_SHIFT_THRESHOLD1)
+#define THERM_STATUS_THRESHOLD0        (1 << 6)
+#define THERM_LOG_THRESHOLD0           (1 << 7)
+#define THERM_STATUS_THRESHOLD1        (1 << 8)
+#define THERM_LOG_THRESHOLD1           (1 << 9)
+
+/* MISC_ENABLE bits: architectural */
+#define MSR_IA32_MISC_ENABLE_FAST_STRING_BIT           0
+#define MSR_IA32_MISC_ENABLE_FAST_STRING               (1ULL << MSR_IA32_MISC_ENABLE_FAST_STRING_BIT)
+#define MSR_IA32_MISC_ENABLE_TCC_BIT                   1
+#define MSR_IA32_MISC_ENABLE_TCC                       (1ULL << MSR_IA32_MISC_ENABLE_TCC_BIT)
+#define MSR_IA32_MISC_ENABLE_EMON_BIT                  7
+#define MSR_IA32_MISC_ENABLE_EMON                      (1ULL << MSR_IA32_MISC_ENABLE_EMON_BIT)
+#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL_BIT           11
+#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL               (1ULL << MSR_IA32_MISC_ENABLE_BTS_UNAVAIL_BIT)
+#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL_BIT          12
+#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL              (1ULL << MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL_BIT)
+#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP_BIT    16
+#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP                (1ULL << MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP_BIT)
+#define MSR_IA32_MISC_ENABLE_MWAIT_BIT                 18
+#define MSR_IA32_MISC_ENABLE_MWAIT                     (1ULL << MSR_IA32_MISC_ENABLE_MWAIT_BIT)
+#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT           22
+#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID               (1ULL << MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT)
+#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT          23
+#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE              (1ULL << MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT)
+#define MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT            34
+#define MSR_IA32_MISC_ENABLE_XD_DISABLE                        (1ULL << MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT)
+
+/* MISC_ENABLE bits: model-specific, meaning may vary from core to core */
+#define MSR_IA32_MISC_ENABLE_X87_COMPAT_BIT            2
+#define MSR_IA32_MISC_ENABLE_X87_COMPAT                        (1ULL << MSR_IA32_MISC_ENABLE_X87_COMPAT_BIT)
+#define MSR_IA32_MISC_ENABLE_TM1_BIT                   3
+#define MSR_IA32_MISC_ENABLE_TM1                       (1ULL << MSR_IA32_MISC_ENABLE_TM1_BIT)
+#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE_BIT    4
+#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE                (1ULL << MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE_BIT)
+#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE_BIT       6
+#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE           (1ULL << MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE_BIT)
+#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK_BIT         8
+#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK             (1ULL << MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK_BIT)
+#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT      9
+#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE          (1ULL << MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT)
+#define MSR_IA32_MISC_ENABLE_FERR_BIT                  10
+#define MSR_IA32_MISC_ENABLE_FERR                      (1ULL << MSR_IA32_MISC_ENABLE_FERR_BIT)
+#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX_BIT                10
+#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX            (1ULL << MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX_BIT)
+#define MSR_IA32_MISC_ENABLE_TM2_BIT                   13
+#define MSR_IA32_MISC_ENABLE_TM2                       (1ULL << MSR_IA32_MISC_ENABLE_TM2_BIT)
+#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE_BIT      19
+#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE          (1ULL << MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE_BIT)
+#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK_BIT                20
+#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK            (1ULL << MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK_BIT)
+#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT_BIT           24
+#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT               (1ULL << MSR_IA32_MISC_ENABLE_L1D_CONTEXT_BIT)
+#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE_BIT      37
+#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE          (1ULL << MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE_BIT)
+#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT         38
+#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE             (1ULL << MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT)
+#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT       39
+#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE           (1ULL << MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT)
+
+#define MSR_IA32_TSC_DEADLINE          0x000006E0
+
+/* P4/Xeon+ specific */
+#define MSR_IA32_MCG_EAX               0x00000180
+#define MSR_IA32_MCG_EBX               0x00000181
+#define MSR_IA32_MCG_ECX               0x00000182
+#define MSR_IA32_MCG_EDX               0x00000183
+#define MSR_IA32_MCG_ESI               0x00000184
+#define MSR_IA32_MCG_EDI               0x00000185
+#define MSR_IA32_MCG_EBP               0x00000186
+#define MSR_IA32_MCG_ESP               0x00000187
+#define MSR_IA32_MCG_EFLAGS            0x00000188
+#define MSR_IA32_MCG_EIP               0x00000189
+#define MSR_IA32_MCG_RESERVED          0x0000018a
+
+/* Pentium IV performance counter MSRs */
+#define MSR_P4_BPU_PERFCTR0            0x00000300
+#define MSR_P4_BPU_PERFCTR1            0x00000301
+#define MSR_P4_BPU_PERFCTR2            0x00000302
+#define MSR_P4_BPU_PERFCTR3            0x00000303
+#define MSR_P4_MS_PERFCTR0             0x00000304
+#define MSR_P4_MS_PERFCTR1             0x00000305
+#define MSR_P4_MS_PERFCTR2             0x00000306
+#define MSR_P4_MS_PERFCTR3             0x00000307
+#define MSR_P4_FLAME_PERFCTR0          0x00000308
+#define MSR_P4_FLAME_PERFCTR1          0x00000309
+#define MSR_P4_FLAME_PERFCTR2          0x0000030a
+#define MSR_P4_FLAME_PERFCTR3          0x0000030b
+#define MSR_P4_IQ_PERFCTR0             0x0000030c
+#define MSR_P4_IQ_PERFCTR1             0x0000030d
+#define MSR_P4_IQ_PERFCTR2             0x0000030e
+#define MSR_P4_IQ_PERFCTR3             0x0000030f
+#define MSR_P4_IQ_PERFCTR4             0x00000310
+#define MSR_P4_IQ_PERFCTR5             0x00000311
+#define MSR_P4_BPU_CCCR0               0x00000360
+#define MSR_P4_BPU_CCCR1               0x00000361
+#define MSR_P4_BPU_CCCR2               0x00000362
+#define MSR_P4_BPU_CCCR3               0x00000363
+#define MSR_P4_MS_CCCR0                        0x00000364
+#define MSR_P4_MS_CCCR1                        0x00000365
+#define MSR_P4_MS_CCCR2                        0x00000366
+#define MSR_P4_MS_CCCR3                        0x00000367
+#define MSR_P4_FLAME_CCCR0             0x00000368
+#define MSR_P4_FLAME_CCCR1             0x00000369
+#define MSR_P4_FLAME_CCCR2             0x0000036a
+#define MSR_P4_FLAME_CCCR3             0x0000036b
+#define MSR_P4_IQ_CCCR0                        0x0000036c
+#define MSR_P4_IQ_CCCR1                        0x0000036d
+#define MSR_P4_IQ_CCCR2                        0x0000036e
+#define MSR_P4_IQ_CCCR3                        0x0000036f
+#define MSR_P4_IQ_CCCR4                        0x00000370
+#define MSR_P4_IQ_CCCR5                        0x00000371
+#define MSR_P4_ALF_ESCR0               0x000003ca
+#define MSR_P4_ALF_ESCR1               0x000003cb
+#define MSR_P4_BPU_ESCR0               0x000003b2
+#define MSR_P4_BPU_ESCR1               0x000003b3
+#define MSR_P4_BSU_ESCR0               0x000003a0
+#define MSR_P4_BSU_ESCR1               0x000003a1
+#define MSR_P4_CRU_ESCR0               0x000003b8
+#define MSR_P4_CRU_ESCR1               0x000003b9
+#define MSR_P4_CRU_ESCR2               0x000003cc
+#define MSR_P4_CRU_ESCR3               0x000003cd
+#define MSR_P4_CRU_ESCR4               0x000003e0
+#define MSR_P4_CRU_ESCR5               0x000003e1
+#define MSR_P4_DAC_ESCR0               0x000003a8
+#define MSR_P4_DAC_ESCR1               0x000003a9
+#define MSR_P4_FIRM_ESCR0              0x000003a4
+#define MSR_P4_FIRM_ESCR1              0x000003a5
+#define MSR_P4_FLAME_ESCR0             0x000003a6
+#define MSR_P4_FLAME_ESCR1             0x000003a7
+#define MSR_P4_FSB_ESCR0               0x000003a2
+#define MSR_P4_FSB_ESCR1               0x000003a3
+#define MSR_P4_IQ_ESCR0                        0x000003ba
+#define MSR_P4_IQ_ESCR1                        0x000003bb
+#define MSR_P4_IS_ESCR0                        0x000003b4
+#define MSR_P4_IS_ESCR1                        0x000003b5
+#define MSR_P4_ITLB_ESCR0              0x000003b6
+#define MSR_P4_ITLB_ESCR1              0x000003b7
+#define MSR_P4_IX_ESCR0                        0x000003c8
+#define MSR_P4_IX_ESCR1                        0x000003c9
+#define MSR_P4_MOB_ESCR0               0x000003aa
+#define MSR_P4_MOB_ESCR1               0x000003ab
+#define MSR_P4_MS_ESCR0                        0x000003c0
+#define MSR_P4_MS_ESCR1                        0x000003c1
+#define MSR_P4_PMH_ESCR0               0x000003ac
+#define MSR_P4_PMH_ESCR1               0x000003ad
+#define MSR_P4_RAT_ESCR0               0x000003bc
+#define MSR_P4_RAT_ESCR1               0x000003bd
+#define MSR_P4_SAAT_ESCR0              0x000003ae
+#define MSR_P4_SAAT_ESCR1              0x000003af
+#define MSR_P4_SSU_ESCR0               0x000003be
+#define MSR_P4_SSU_ESCR1               0x000003bf /* guess: not in manual */
+
+#define MSR_P4_TBPU_ESCR0              0x000003c2
+#define MSR_P4_TBPU_ESCR1              0x000003c3
+#define MSR_P4_TC_ESCR0                        0x000003c4
+#define MSR_P4_TC_ESCR1                        0x000003c5
+#define MSR_P4_U2L_ESCR0               0x000003b0
+#define MSR_P4_U2L_ESCR1               0x000003b1
+
+#define MSR_P4_PEBS_MATRIX_VERT                0x000003f2
+
+/* Intel Core-based CPU performance counters */
+#define MSR_CORE_PERF_FIXED_CTR0       0x00000309
+#define MSR_CORE_PERF_FIXED_CTR1       0x0000030a
+#define MSR_CORE_PERF_FIXED_CTR2       0x0000030b
+#define MSR_CORE_PERF_FIXED_CTR_CTRL   0x0000038d
+#define MSR_CORE_PERF_GLOBAL_STATUS    0x0000038e
+#define MSR_CORE_PERF_GLOBAL_CTRL      0x0000038f
+#define MSR_CORE_PERF_GLOBAL_OVF_CTRL  0x00000390
+
+/* Geode defined MSRs */
+#define MSR_GEODE_BUSCONT_CONF0                0x00001900
+
+/* Intel VT MSRs */
+#define MSR_IA32_VMX_BASIC              0x00000480
+#define MSR_IA32_VMX_PINBASED_CTLS      0x00000481
+#define MSR_IA32_VMX_PROCBASED_CTLS     0x00000482
+#define MSR_IA32_VMX_EXIT_CTLS          0x00000483
+#define MSR_IA32_VMX_ENTRY_CTLS         0x00000484
+#define MSR_IA32_VMX_MISC               0x00000485
+#define MSR_IA32_VMX_CR0_FIXED0         0x00000486
+#define MSR_IA32_VMX_CR0_FIXED1         0x00000487
+#define MSR_IA32_VMX_CR4_FIXED0         0x00000488
+#define MSR_IA32_VMX_CR4_FIXED1         0x00000489
+#define MSR_IA32_VMX_VMCS_ENUM          0x0000048a
+#define MSR_IA32_VMX_PROCBASED_CTLS2    0x0000048b
+#define MSR_IA32_VMX_EPT_VPID_CAP       0x0000048c
+#define MSR_IA32_VMX_TRUE_PINBASED_CTLS  0x0000048d
+#define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e
+#define MSR_IA32_VMX_TRUE_EXIT_CTLS      0x0000048f
+#define MSR_IA32_VMX_TRUE_ENTRY_CTLS     0x00000490
+#define MSR_IA32_VMX_VMFUNC             0x00000491
+
+/* VMX_BASIC bits and bitmasks */
+#define VMX_BASIC_VMCS_SIZE_SHIFT      32
+#define VMX_BASIC_TRUE_CTLS            (1ULL << 55)
+#define VMX_BASIC_64           0x0001000000000000LLU
+#define VMX_BASIC_MEM_TYPE_SHIFT       50
+#define VMX_BASIC_MEM_TYPE_MASK        0x003c000000000000LLU
+#define VMX_BASIC_MEM_TYPE_WB  6LLU
+#define VMX_BASIC_INOUT                0x0040000000000000LLU
+
+/* MSR_IA32_VMX_MISC bits */
+#define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29)
+#define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE   0x1F
+/* AMD-V MSRs */
+
+#define MSR_VM_CR                       0xc0010114
+#define MSR_VM_IGNNE                    0xc0010115
+#define MSR_VM_HSAVE_PA                 0xc0010117
+
+#endif /* _ASM_X86_MSR_INDEX_H */
index de36f22eb0b9e79db05711cd46196c0c072bb023..e6a707eb508167dea3eda93a440cee3ba15f39db 100644 (file)
@@ -1,13 +1,14 @@
 #ifndef _ASM_X86_MSR_H
 #define _ASM_X86_MSR_H
 
-#include <uapi/asm/msr.h>
+#include "msr-index.h"
 
 #ifndef __ASSEMBLY__
 
 #include <asm/asm.h>
 #include <asm/errno.h>
 #include <asm/cpumask.h>
+#include <uapi/asm/msr.h>
 
 struct msr {
        union {
@@ -205,8 +206,13 @@ do {                                                            \
 
 #endif /* !CONFIG_PARAVIRT */
 
-#define wrmsrl_safe(msr, val) wrmsr_safe((msr), (u32)(val),            \
-                                            (u32)((val) >> 32))
+/*
+ * 64-bit version of wrmsr_safe():
+ */
+static inline int wrmsrl_safe(u32 msr, u64 val)
+{
+       return wrmsr_safe(msr, (u32)val,  (u32)(val >> 32));
+}
 
 #define write_tsc(low, high) wrmsr(MSR_IA32_TSC, (low), (high))
 
index f768f62984194a13da806c0fab7691ec8b4167d2..b94f6f64e23d0cf7e630c190fe48518b47e819ed 100644 (file)
@@ -31,7 +31,7 @@
  * arch_phys_wc_add and arch_phys_wc_del.
  */
 # ifdef CONFIG_MTRR
-extern u8 mtrr_type_lookup(u64 addr, u64 end);
+extern u8 mtrr_type_lookup(u64 addr, u64 end, u8 *uniform);
 extern void mtrr_save_fixed_ranges(void *);
 extern void mtrr_save_state(void);
 extern int mtrr_add(unsigned long base, unsigned long size,
@@ -48,14 +48,13 @@ extern void mtrr_aps_init(void);
 extern void mtrr_bp_restore(void);
 extern int mtrr_trim_uncached_memory(unsigned long end_pfn);
 extern int amd_special_default_mtrr(void);
-extern int phys_wc_to_mtrr_index(int handle);
 #  else
-static inline u8 mtrr_type_lookup(u64 addr, u64 end)
+static inline u8 mtrr_type_lookup(u64 addr, u64 end, u8 *uniform)
 {
        /*
         * Return no-MTRRs:
         */
-       return 0xff;
+       return MTRR_TYPE_INVALID;
 }
 #define mtrr_save_fixed_ranges(arg) do {} while (0)
 #define mtrr_save_state() do {} while (0)
@@ -84,10 +83,6 @@ static inline int mtrr_trim_uncached_memory(unsigned long end_pfn)
 static inline void mtrr_centaur_report_mcr(int mcr, u32 lo, u32 hi)
 {
 }
-static inline int phys_wc_to_mtrr_index(int handle)
-{
-       return -1;
-}
 
 #define mtrr_ap_init() do {} while (0)
 #define mtrr_bp_init() do {} while (0)
@@ -127,4 +122,8 @@ struct mtrr_gentry32 {
                                 _IOW(MTRR_IOCTL_BASE,  9, struct mtrr_sentry32)
 #endif /* CONFIG_COMPAT */
 
+/* Bit fields for enabled in struct mtrr_state_type */
+#define MTRR_STATE_MTRR_FIXED_ENABLED  0x01
+#define MTRR_STATE_MTRR_ENABLED                0x02
+
 #endif /* _ASM_X86_MTRR_H */
index 8957810ad7d1e348dd6315242c3411a863745be6..d143bfad45d70f98e541c14f0e4f94a312d7b2ad 100644 (file)
@@ -712,6 +712,31 @@ static inline void __set_fixmap(unsigned /* enum fixed_addresses */ idx,
 
 #if defined(CONFIG_SMP) && defined(CONFIG_PARAVIRT_SPINLOCKS)
 
+#ifdef CONFIG_QUEUED_SPINLOCKS
+
+static __always_inline void pv_queued_spin_lock_slowpath(struct qspinlock *lock,
+                                                       u32 val)
+{
+       PVOP_VCALL2(pv_lock_ops.queued_spin_lock_slowpath, lock, val);
+}
+
+static __always_inline void pv_queued_spin_unlock(struct qspinlock *lock)
+{
+       PVOP_VCALLEE1(pv_lock_ops.queued_spin_unlock, lock);
+}
+
+static __always_inline void pv_wait(u8 *ptr, u8 val)
+{
+       PVOP_VCALL2(pv_lock_ops.wait, ptr, val);
+}
+
+static __always_inline void pv_kick(int cpu)
+{
+       PVOP_VCALL1(pv_lock_ops.kick, cpu);
+}
+
+#else /* !CONFIG_QUEUED_SPINLOCKS */
+
 static __always_inline void __ticket_lock_spinning(struct arch_spinlock *lock,
                                                        __ticket_t ticket)
 {
@@ -724,7 +749,9 @@ static __always_inline void __ticket_unlock_kick(struct arch_spinlock *lock,
        PVOP_VCALL2(pv_lock_ops.unlock_kick, lock, ticket);
 }
 
-#endif
+#endif /* CONFIG_QUEUED_SPINLOCKS */
+
+#endif /* SMP && PARAVIRT_SPINLOCKS */
 
 #ifdef CONFIG_X86_32
 #define PV_SAVE_REGS "pushl %ecx; pushl %edx;"
index f7b0b5c112f28cc89c524231e9022b4560158b48..a6b8f9fadb06853862d9e8f2fe9e51d7698ec647 100644 (file)
@@ -160,13 +160,14 @@ struct pv_cpu_ops {
        u64 (*read_pmc)(int counter);
        unsigned long long (*read_tscp)(unsigned int *aux);
 
+#ifdef CONFIG_X86_32
        /*
         * Atomically enable interrupts and return to userspace.  This
-        * is only ever used to return to 32-bit processes; in a
-        * 64-bit kernel, it's used for 32-on-64 compat processes, but
-        * never native 64-bit processes.  (Jump, not call.)
+        * is only used in 32-bit kernels.  64-bit kernels use
+        * usergs_sysret32 instead.
         */
        void (*irq_enable_sysexit)(void);
+#endif
 
        /*
         * Switch to usermode gs and return to 64-bit usermode using
@@ -333,9 +334,19 @@ struct arch_spinlock;
 typedef u16 __ticket_t;
 #endif
 
+struct qspinlock;
+
 struct pv_lock_ops {
+#ifdef CONFIG_QUEUED_SPINLOCKS
+       void (*queued_spin_lock_slowpath)(struct qspinlock *lock, u32 val);
+       struct paravirt_callee_save queued_spin_unlock;
+
+       void (*wait)(u8 *ptr, u8 val);
+       void (*kick)(int cpu);
+#else /* !CONFIG_QUEUED_SPINLOCKS */
        struct paravirt_callee_save lock_spinning;
        void (*unlock_kick)(struct arch_spinlock *lock, __ticket_t ticket);
+#endif /* !CONFIG_QUEUED_SPINLOCKS */
 };
 
 /* This contains all the paravirt structures: we get a convenient
index 91bc4ba95f919e90c3a398469f2dd52ca40e3cdd..ca6c228d5e62837be88984b652bb436949295d03 100644 (file)
@@ -4,14 +4,9 @@
 #include <linux/types.h>
 #include <asm/pgtable_types.h>
 
-#ifdef CONFIG_X86_PAT
-extern int pat_enabled;
-#else
-static const int pat_enabled;
-#endif
-
+bool pat_enabled(void);
 extern void pat_init(void);
-void pat_init_cache_modes(void);
+void pat_init_cache_modes(u64);
 
 extern int reserve_memtype(u64 start, u64 end,
                enum page_cache_mode req_pcm, enum page_cache_mode *ret_pcm);
index 4e370a5d81170e4fb4c6fa5d1abaf451c87cf502..d8c80ff32e8cfce6233401637f22fb30988bd76f 100644 (file)
@@ -96,15 +96,10 @@ extern void pci_iommu_alloc(void);
 #ifdef CONFIG_PCI_MSI
 /* implemented in arch/x86/kernel/apic/io_apic. */
 struct msi_desc;
-void native_compose_msi_msg(struct pci_dev *pdev, unsigned int irq,
-                           unsigned int dest, struct msi_msg *msg, u8 hpet_id);
 int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
 void native_teardown_msi_irq(unsigned int irq);
 void native_restore_msi_irqs(struct pci_dev *dev);
-int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
-                 unsigned int irq_base, unsigned int irq_offset);
 #else
-#define native_compose_msi_msg         NULL
 #define native_setup_msi_irqs          NULL
 #define native_teardown_msi_irq                NULL
 #endif
index fe57e7a98839801ce76a6a33c0f3a7552e5d292a..2562e303405b9d3c314c46a1ce180d4d097d7a11 100644 (file)
@@ -398,11 +398,17 @@ static inline int is_new_memtype_allowed(u64 paddr, unsigned long size,
         * requested memtype:
         * - request is uncached, return cannot be write-back
         * - request is write-combine, return cannot be write-back
+        * - request is write-through, return cannot be write-back
+        * - request is write-through, return cannot be write-combine
         */
        if ((pcm == _PAGE_CACHE_MODE_UC_MINUS &&
             new_pcm == _PAGE_CACHE_MODE_WB) ||
            (pcm == _PAGE_CACHE_MODE_WC &&
-            new_pcm == _PAGE_CACHE_MODE_WB)) {
+            new_pcm == _PAGE_CACHE_MODE_WB) ||
+           (pcm == _PAGE_CACHE_MODE_WT &&
+            new_pcm == _PAGE_CACHE_MODE_WB) ||
+           (pcm == _PAGE_CACHE_MODE_WT &&
+            new_pcm == _PAGE_CACHE_MODE_WC)) {
                return 0;
        }
 
index 78f0c8cbe316f9114746877420cfc400604bd486..13f310bfc09a754bfa3c69305476421e7dd67706 100644 (file)
@@ -367,6 +367,9 @@ extern int nx_enabled;
 #define pgprot_writecombine    pgprot_writecombine
 extern pgprot_t pgprot_writecombine(pgprot_t prot);
 
+#define pgprot_writethrough    pgprot_writethrough
+extern pgprot_t pgprot_writethrough(pgprot_t prot);
+
 /* Indicate that x86 has its own track and untrack pfn vma functions */
 #define __HAVE_PFNMAP_TRACKING
 
index 8f32718425339f426778bbe09e2c468e5ff63814..dca71714f86076f5fae6508fe6bbcb2c573723ae 100644 (file)
@@ -99,11 +99,9 @@ static __always_inline bool should_resched(void)
   extern asmlinkage void ___preempt_schedule(void);
 # define __preempt_schedule() asm ("call ___preempt_schedule")
   extern asmlinkage void preempt_schedule(void);
-# ifdef CONFIG_CONTEXT_TRACKING
-    extern asmlinkage void ___preempt_schedule_context(void);
-#   define __preempt_schedule_context() asm ("call ___preempt_schedule_context")
-    extern asmlinkage void preempt_schedule_context(void);
-# endif
+  extern asmlinkage void ___preempt_schedule_notrace(void);
+# define __preempt_schedule_notrace() asm ("call ___preempt_schedule_notrace")
+  extern asmlinkage void preempt_schedule_notrace(void);
 #endif
 
 #endif /* __ASM_PREEMPT_H */
index 23ba6765b718c790dbf698edbb2758a2ca6f9102..43e6519df0d507429a9533b51c7a28f2c0f3b90b 100644 (file)
@@ -21,6 +21,7 @@ struct mm_struct;
 #include <asm/desc_defs.h>
 #include <asm/nops.h>
 #include <asm/special_insns.h>
+#include <asm/fpu/types.h>
 
 #include <linux/personality.h>
 #include <linux/cpumask.h>
@@ -52,11 +53,16 @@ static inline void *current_text_addr(void)
        return pc;
 }
 
+/*
+ * These alignment constraints are for performance in the vSMP case,
+ * but in the task_struct case we must also meet hardware imposed
+ * alignment requirements of the FPU state:
+ */
 #ifdef CONFIG_X86_VSMP
 # define ARCH_MIN_TASKALIGN            (1 << INTERNODE_CACHE_SHIFT)
 # define ARCH_MIN_MMSTRUCT_ALIGN       (1 << INTERNODE_CACHE_SHIFT)
 #else
-# define ARCH_MIN_TASKALIGN            16
+# define ARCH_MIN_TASKALIGN            __alignof__(union fpregs_state)
 # define ARCH_MIN_MMSTRUCT_ALIGN       0
 #endif
 
@@ -166,7 +172,6 @@ extern const struct seq_operations cpuinfo_op;
 #define cache_line_size()      (boot_cpu_data.x86_cache_alignment)
 
 extern void cpu_detect(struct cpuinfo_x86 *c);
-extern void fpu_detect(struct cpuinfo_x86 *c);
 
 extern void early_cpu_init(void);
 extern void identify_boot_cpu(void);
@@ -313,128 +318,6 @@ struct orig_ist {
        unsigned long           ist[7];
 };
 
-#define        MXCSR_DEFAULT           0x1f80
-
-struct i387_fsave_struct {
-       u32                     cwd;    /* FPU Control Word             */
-       u32                     swd;    /* FPU Status Word              */
-       u32                     twd;    /* FPU Tag Word                 */
-       u32                     fip;    /* FPU IP Offset                */
-       u32                     fcs;    /* FPU IP Selector              */
-       u32                     foo;    /* FPU Operand Pointer Offset   */
-       u32                     fos;    /* FPU Operand Pointer Selector */
-
-       /* 8*10 bytes for each FP-reg = 80 bytes:                       */
-       u32                     st_space[20];
-
-       /* Software status information [not touched by FSAVE ]:         */
-       u32                     status;
-};
-
-struct i387_fxsave_struct {
-       u16                     cwd; /* Control Word                    */
-       u16                     swd; /* Status Word                     */
-       u16                     twd; /* Tag Word                        */
-       u16                     fop; /* Last Instruction Opcode         */
-       union {
-               struct {
-                       u64     rip; /* Instruction Pointer             */
-                       u64     rdp; /* Data Pointer                    */
-               };
-               struct {
-                       u32     fip; /* FPU IP Offset                   */
-                       u32     fcs; /* FPU IP Selector                 */
-                       u32     foo; /* FPU Operand Offset              */
-                       u32     fos; /* FPU Operand Selector            */
-               };
-       };
-       u32                     mxcsr;          /* MXCSR Register State */
-       u32                     mxcsr_mask;     /* MXCSR Mask           */
-
-       /* 8*16 bytes for each FP-reg = 128 bytes:                      */
-       u32                     st_space[32];
-
-       /* 16*16 bytes for each XMM-reg = 256 bytes:                    */
-       u32                     xmm_space[64];
-
-       u32                     padding[12];
-
-       union {
-               u32             padding1[12];
-               u32             sw_reserved[12];
-       };
-
-} __attribute__((aligned(16)));
-
-struct i387_soft_struct {
-       u32                     cwd;
-       u32                     swd;
-       u32                     twd;
-       u32                     fip;
-       u32                     fcs;
-       u32                     foo;
-       u32                     fos;
-       /* 8*10 bytes for each FP-reg = 80 bytes: */
-       u32                     st_space[20];
-       u8                      ftop;
-       u8                      changed;
-       u8                      lookahead;
-       u8                      no_update;
-       u8                      rm;
-       u8                      alimit;
-       struct math_emu_info    *info;
-       u32                     entry_eip;
-};
-
-struct ymmh_struct {
-       /* 16 * 16 bytes for each YMMH-reg = 256 bytes */
-       u32 ymmh_space[64];
-};
-
-/* We don't support LWP yet: */
-struct lwp_struct {
-       u8 reserved[128];
-};
-
-struct bndreg {
-       u64 lower_bound;
-       u64 upper_bound;
-} __packed;
-
-struct bndcsr {
-       u64 bndcfgu;
-       u64 bndstatus;
-} __packed;
-
-struct xsave_hdr_struct {
-       u64 xstate_bv;
-       u64 xcomp_bv;
-       u64 reserved[6];
-} __attribute__((packed));
-
-struct xsave_struct {
-       struct i387_fxsave_struct i387;
-       struct xsave_hdr_struct xsave_hdr;
-       struct ymmh_struct ymmh;
-       struct lwp_struct lwp;
-       struct bndreg bndreg[4];
-       struct bndcsr bndcsr;
-       /* new processor state extensions will go here */
-} __attribute__ ((packed, aligned (64)));
-
-union thread_xstate {
-       struct i387_fsave_struct        fsave;
-       struct i387_fxsave_struct       fxsave;
-       struct i387_soft_struct         soft;
-       struct xsave_struct             xsave;
-};
-
-struct fpu {
-       unsigned int last_cpu;
-       unsigned int has_fpu;
-       union thread_xstate *state;
-};
-
 #ifdef CONFIG_X86_64
 DECLARE_PER_CPU(struct orig_ist, orig_ist);
 
@@ -483,8 +366,6 @@ DECLARE_PER_CPU(struct irq_stack *, softirq_stack);
 #endif /* X86_64 */
 
 extern unsigned int xstate_size;
-extern void free_thread_xstate(struct task_struct *);
-extern struct kmem_cache *task_xstate_cachep;
 
 struct perf_event;
 
@@ -508,6 +389,10 @@ struct thread_struct {
        unsigned long           fs;
 #endif
        unsigned long           gs;
+
+       /* Floating point and extended processor state */
+       struct fpu              fpu;
+
        /* Save middle states of ptrace breakpoints */
        struct perf_event       *ptrace_bps[HBP_NUM];
        /* Debug status used for traps, single steps, etc... */
@@ -518,8 +403,6 @@ struct thread_struct {
        unsigned long           cr2;
        unsigned long           trap_nr;
        unsigned long           error_code;
-       /* floating point and extended processor state */
-       struct fpu              fpu;
 #ifdef CONFIG_X86_32
        /* Virtual 86 mode info */
        struct vm86_struct __user *vm86_info;
@@ -535,15 +418,6 @@ struct thread_struct {
        unsigned long           iopl;
        /* Max allowed port in the bitmap, in bytes: */
        unsigned                io_bitmap_max;
-       /*
-        * fpu_counter contains the number of consecutive context switches
-        * that the FPU is used. If this is over a threshold, the lazy fpu
-        * saving becomes unlazy to save the trap. This is an unsigned char
-        * so that after 256 times the counter wraps and the behavior turns
-        * lazy again; this to deal with bursty apps that only use FPU for
-        * a short time
-        */
-       unsigned char fpu_counter;
 };
 
 /*
@@ -928,24 +802,25 @@ extern int get_tsc_mode(unsigned long adr);
 extern int set_tsc_mode(unsigned int val);
 
 /* Register/unregister a process' MPX related resource */
-#define MPX_ENABLE_MANAGEMENT(tsk)     mpx_enable_management((tsk))
-#define MPX_DISABLE_MANAGEMENT(tsk)    mpx_disable_management((tsk))
+#define MPX_ENABLE_MANAGEMENT()        mpx_enable_management()
+#define MPX_DISABLE_MANAGEMENT()       mpx_disable_management()
 
 #ifdef CONFIG_X86_INTEL_MPX
-extern int mpx_enable_management(struct task_struct *tsk);
-extern int mpx_disable_management(struct task_struct *tsk);
+extern int mpx_enable_management(void);
+extern int mpx_disable_management(void);
 #else
-static inline int mpx_enable_management(struct task_struct *tsk)
+static inline int mpx_enable_management(void)
 {
        return -EINVAL;
 }
-static inline int mpx_disable_management(struct task_struct *tsk)
+static inline int mpx_disable_management(void)
 {
        return -EINVAL;
 }
 #endif /* CONFIG_X86_INTEL_MPX */
 
 extern u16 amd_get_nb_id(int cpu);
+extern u32 amd_get_nodes_per_socket(void);
 
 static inline uint32_t hypervisor_cpuid_base(const char *sig, uint32_t leaves)
 {
index a90f8972dad507240ae946b61fd8a5f217d4be52..a4a77286cb1ddf68b466e934e9f69cb1785f3062 100644 (file)
@@ -5,12 +5,14 @@
 
 /* misc architecture specific prototypes */
 
-void system_call(void);
 void syscall_init(void);
 
-void ia32_syscall(void);
-void ia32_cstar_target(void);
-void ia32_sysenter_target(void);
+void entry_SYSCALL_64(void);
+void entry_SYSCALL_compat(void);
+void entry_INT80_32(void);
+void entry_INT80_compat(void);
+void entry_SYSENTER_32(void);
+void entry_SYSENTER_compat(void);
 
 void x86_configure_nx(void);
 void x86_report_nx(void);
index 19507ffa5d28e9ce3ddece3856dd9cde4446f7f8..5fabf1362942c65e5fc4327511e51487a14bd5d7 100644 (file)
@@ -107,7 +107,7 @@ static inline unsigned long regs_return_value(struct pt_regs *regs)
 static inline int user_mode(struct pt_regs *regs)
 {
 #ifdef CONFIG_X86_32
-       return (regs->cs & SEGMENT_RPL_MASK) == USER_RPL;
+       return ((regs->cs & SEGMENT_RPL_MASK) | (regs->flags & X86_VM_MASK)) >= USER_RPL;
 #else
        return !!(regs->cs & 3);
 #endif
diff --git a/arch/x86/include/asm/qspinlock.h b/arch/x86/include/asm/qspinlock.h
new file mode 100644 (file)
index 0000000..9d51fae
--- /dev/null
@@ -0,0 +1,57 @@
+#ifndef _ASM_X86_QSPINLOCK_H
+#define _ASM_X86_QSPINLOCK_H
+
+#include <asm/cpufeature.h>
+#include <asm-generic/qspinlock_types.h>
+#include <asm/paravirt.h>
+
+#define        queued_spin_unlock queued_spin_unlock
+/**
+ * queued_spin_unlock - release a queued spinlock
+ * @lock : Pointer to queued spinlock structure
+ *
+ * A smp_store_release() on the least-significant byte.
+ */
+static inline void native_queued_spin_unlock(struct qspinlock *lock)
+{
+       smp_store_release((u8 *)lock, 0);
+}
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+extern void native_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+extern void __pv_init_lock_hash(void);
+extern void __pv_queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+extern void __raw_callee_save___pv_queued_spin_unlock(struct qspinlock *lock);
+
+static inline void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
+{
+       pv_queued_spin_lock_slowpath(lock, val);
+}
+
+static inline void queued_spin_unlock(struct qspinlock *lock)
+{
+       pv_queued_spin_unlock(lock);
+}
+#else
+static inline void queued_spin_unlock(struct qspinlock *lock)
+{
+       native_queued_spin_unlock(lock);
+}
+#endif
+
+#define virt_queued_spin_lock virt_queued_spin_lock
+
+static inline bool virt_queued_spin_lock(struct qspinlock *lock)
+{
+       if (!static_cpu_has(X86_FEATURE_HYPERVISOR))
+               return false;
+
+       while (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) != 0)
+               cpu_relax();
+
+       return true;
+}
+
+#include <asm-generic/qspinlock.h>
+
+#endif /* _ASM_X86_QSPINLOCK_H */
diff --git a/arch/x86/include/asm/qspinlock_paravirt.h b/arch/x86/include/asm/qspinlock_paravirt.h
new file mode 100644 (file)
index 0000000..b002e71
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef __ASM_QSPINLOCK_PARAVIRT_H
+#define __ASM_QSPINLOCK_PARAVIRT_H
+
+PV_CALLEE_SAVE_REGS_THUNK(__pv_queued_spin_unlock);
+
+#endif
index 5a9856eb12bad7edb0f9a333870e331f5677d588..7d5a1929d76b31bba69295e533e460ed50904cfd 100644 (file)
 #define TLS_SIZE                       (GDT_ENTRY_TLS_ENTRIES* 8)
 
 #ifdef __KERNEL__
+
+/*
+ * early_idt_handler_array is an array of entry points referenced in the
+ * early IDT.  For simplicity, it's a real array with one entry point
+ * every nine bytes.  That leaves room for an optional 'push $0' if the
+ * vector has no error code (two bytes), a 'push $vector_number' (two
+ * bytes), and a jump to the common entry code (up to five bytes).
+ */
+#define EARLY_IDT_HANDLER_SIZE 9
+
 #ifndef __ASSEMBLY__
 
-extern const char early_idt_handlers[NUM_EXCEPTION_VECTORS][2+2+5];
+extern const char early_idt_handler_array[NUM_EXCEPTION_VECTORS][EARLY_IDT_HANDLER_SIZE];
 #ifdef CONFIG_TRACING
-# define trace_early_idt_handlers early_idt_handlers
+# define trace_early_idt_handler_array early_idt_handler_array
 #endif
 
 /*
index ee80b92f00962a392a824221dbdbeb781b6312bf..6c8a7ed13365ae5dd675fd4b158c8034ba213a29 100644 (file)
@@ -1,5 +1,5 @@
 
-#include <asm/i387.h>
+#include <asm/fpu/api.h>
 
 /*
  * may_use_simd - whether it is allowable at this time to issue SIMD
index 17a8dced12daef5c9fcc5351c165994225295372..222a6a3ca2b5ebeeff21037b8a3b517b8fd3353a 100644 (file)
@@ -37,16 +37,6 @@ DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
 DECLARE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id);
 DECLARE_PER_CPU_READ_MOSTLY(int, cpu_number);
 
-static inline struct cpumask *cpu_sibling_mask(int cpu)
-{
-       return per_cpu(cpu_sibling_map, cpu);
-}
-
-static inline struct cpumask *cpu_core_mask(int cpu)
-{
-       return per_cpu(cpu_core_map, cpu);
-}
-
 static inline struct cpumask *cpu_llc_shared_mask(int cpu)
 {
        return per_cpu(cpu_llc_shared_map, cpu);
index aeb4666e0c0a770a7fbb8432b7d133b2dd9e764d..2270e41b32fd856e226840d4f76dddaa7852d13d 100644 (file)
@@ -215,6 +215,44 @@ static inline void clwb(volatile void *__p)
                : [pax] "a" (p));
 }
 
+/**
+ * pcommit_sfence() - persistent commit and fence
+ *
+ * The PCOMMIT instruction ensures that data that has been flushed from the
+ * processor's cache hierarchy with CLWB, CLFLUSHOPT or CLFLUSH is accepted to
+ * memory and is durable on the DIMM.  The primary use case for this is
+ * persistent memory.
+ *
+ * This function shows how to properly use CLWB/CLFLUSHOPT/CLFLUSH and PCOMMIT
+ * with appropriate fencing.
+ *
+ * Example:
+ * void flush_and_commit_buffer(void *vaddr, unsigned int size)
+ * {
+ *         unsigned long clflush_mask = boot_cpu_data.x86_clflush_size - 1;
+ *         void *vend = vaddr + size;
+ *         void *p;
+ *
+ *         for (p = (void *)((unsigned long)vaddr & ~clflush_mask);
+ *              p < vend; p += boot_cpu_data.x86_clflush_size)
+ *                 clwb(p);
+ *
+ *         // SFENCE to order CLWB/CLFLUSHOPT/CLFLUSH cache flushes
+ *         // MFENCE via mb() also works
+ *         wmb();
+ *
+ *         // PCOMMIT and the required SFENCE for ordering
+ *         pcommit_sfence();
+ * }
+ *
+ * After this function completes the data pointed to by 'vaddr' has been
+ * accepted to memory and will be durable if the 'vaddr' points to persistent
+ * memory.
+ *
+ * PCOMMIT must always be ordered by an MFENCE or SFENCE, so to help simplify
+ * things we include both the PCOMMIT and the required SFENCE in the
+ * alternatives generated by pcommit_sfence().
+ */
 static inline void pcommit_sfence(void)
 {
        alternative(ASM_NOP7,
index 64b611782ef0856f1744611936f76d6e8de1bb57..be0a05913b9105b62122afbbbef99c6146b2dbf6 100644 (file)
 extern struct static_key paravirt_ticketlocks_enabled;
 static __always_inline bool static_key_false(struct static_key *key);
 
+#ifdef CONFIG_QUEUED_SPINLOCKS
+#include <asm/qspinlock.h>
+#else
+
 #ifdef CONFIG_PARAVIRT_SPINLOCKS
 
 static inline void __ticket_enter_slowpath(arch_spinlock_t *lock)
@@ -196,6 +200,7 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
                cpu_relax();
        }
 }
+#endif /* CONFIG_QUEUED_SPINLOCKS */
 
 /*
  * Read-write spinlocks, allowing multiple readers
index 5f9d7572d82b190a2b2d2219bb205a1e543d5acd..65c3e37f879aced6501861eff95d56e08735236a 100644 (file)
@@ -23,6 +23,9 @@ typedef u32 __ticketpair_t;
 
 #define TICKET_SHIFT   (sizeof(__ticket_t) * 8)
 
+#ifdef CONFIG_QUEUED_SPINLOCKS
+#include <asm-generic/qspinlock_types.h>
+#else
 typedef struct arch_spinlock {
        union {
                __ticketpair_t head_tail;
@@ -33,6 +36,7 @@ typedef struct arch_spinlock {
 } arch_spinlock_t;
 
 #define __ARCH_SPIN_LOCK_UNLOCKED      { { 0 } }
+#endif /* CONFIG_QUEUED_SPINLOCKS */
 
 #include <asm-generic/qrwlock_types.h>
 
index 6a998598f172424f198bf7e034676a8c47b2eb9b..c2e00bb2a1365cef17911e262d28d0fad75e6828 100644 (file)
@@ -39,7 +39,9 @@
 #include <asm/processor.h>
 #include <asm/percpu.h>
 #include <asm/desc.h>
+
 #include <linux/random.h>
+#include <linux/sched.h>
 
 /*
  * 24 byte read-only segment initializer for stack canary.  Linker
index 552d6c90a6d43dbebbae0756ba10061bc0969e8d..d1793f06854d28f22481c40f03cd2171d7b13495 100644 (file)
@@ -7,7 +7,7 @@
 #define _ASM_X86_SUSPEND_32_H
 
 #include <asm/desc.h>
-#include <asm/i387.h>
+#include <asm/fpu/api.h>
 
 /* image of the saved processor state */
 struct saved_context {
index bc6232834babf33973c8a4e6da1ce63cd159dce4..7ebf0ebe4e687f3704cac47b3de3dd1b906d27e8 100644 (file)
@@ -7,7 +7,7 @@
 #define _ASM_X86_SUSPEND_64_H
 
 #include <asm/desc.h>
-#include <asm/i387.h>
+#include <asm/fpu/api.h>
 
 /*
  * Image of the saved processor state, used by the low level ACPI suspend to
index b4bdec3e9523e03b7ece3154302c8d95ce2a9ac8..225ee545e1a05e36bfa25eb6a214102b8be8c0bf 100644 (file)
@@ -177,8 +177,6 @@ struct thread_info {
  */
 #ifndef __ASSEMBLY__
 
-DECLARE_PER_CPU(unsigned long, kernel_stack);
-
 static inline struct thread_info *current_thread_info(void)
 {
        return (struct thread_info *)(current_top_of_stack() - THREAD_SIZE);
@@ -197,9 +195,13 @@ static inline unsigned long current_stack_pointer(void)
 
 #else /* !__ASSEMBLY__ */
 
+#ifdef CONFIG_X86_64
+# define cpu_current_top_of_stack (cpu_tss + TSS_sp0)
+#endif
+
 /* Load thread_info address into "reg" */
 #define GET_THREAD_INFO(reg) \
-       _ASM_MOV PER_CPU_VAR(kernel_stack),reg ; \
+       _ASM_MOV PER_CPU_VAR(cpu_current_top_of_stack),reg ; \
        _ASM_SUB $(THREAD_SIZE),reg ;
 
 /*
index 0e8f04f2c26fda3935093b7040dac783e621d71b..0fb46482dfde160b9dcfad6ef57841e07c3830e2 100644 (file)
@@ -26,7 +26,7 @@
 #define _ASM_X86_TOPOLOGY_H
 
 #ifdef CONFIG_X86_32
-# ifdef CONFIG_X86_HT
+# ifdef CONFIG_SMP
 #  define ENABLE_TOPO_DEFINES
 # endif
 #else
@@ -124,7 +124,7 @@ extern const struct cpumask *cpu_coregroup_mask(int cpu);
 
 #ifdef ENABLE_TOPO_DEFINES
 #define topology_core_cpumask(cpu)             (per_cpu(cpu_core_map, cpu))
-#define topology_thread_cpumask(cpu)           (per_cpu(cpu_sibling_map, cpu))
+#define topology_sibling_cpumask(cpu)          (per_cpu(cpu_sibling_map, cpu))
 #endif
 
 static inline void arch_fix_phys_package_id(int num, u32 slot)
index 4cab890007a7267ecc7ce148eaa1fdfbee4a49df..38a09a13a9bcdab24f0e2f08fd67f10cc25236dd 100644 (file)
@@ -100,6 +100,12 @@ DEFINE_IRQ_VECTOR_EVENT(call_function_single);
  */
 DEFINE_IRQ_VECTOR_EVENT(threshold_apic);
 
+/*
+ * deferred_error_apic - called when entering/exiting a deferred apic interrupt
+ * vector handler
+ */
+DEFINE_IRQ_VECTOR_EVENT(deferred_error_apic);
+
 /*
  * thermal_apic - called when entering/exiting a thermal apic interrupt
  * vector handler
diff --git a/arch/x86/include/asm/trace/mpx.h b/arch/x86/include/asm/trace/mpx.h
new file mode 100644 (file)
index 0000000..173dd3b
--- /dev/null
@@ -0,0 +1,132 @@
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM mpx
+
+#if !defined(_TRACE_MPX_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_MPX_H
+
+#include <linux/tracepoint.h>
+
+#ifdef CONFIG_X86_INTEL_MPX
+
+TRACE_EVENT(mpx_bounds_register_exception,
+
+       TP_PROTO(void *addr_referenced,
+                const struct bndreg *bndreg),
+       TP_ARGS(addr_referenced, bndreg),
+
+       TP_STRUCT__entry(
+               __field(void *, addr_referenced)
+               __field(u64, lower_bound)
+               __field(u64, upper_bound)
+       ),
+
+       TP_fast_assign(
+               __entry->addr_referenced = addr_referenced;
+               __entry->lower_bound = bndreg->lower_bound;
+               __entry->upper_bound = bndreg->upper_bound;
+       ),
+       /*
+        * Note that we are printing out the '~' of the upper
+        * bounds register here.  It is actually stored in its
+        * one's complement form so that its 'init' state
+        * corresponds to all 0's.  But, that looks like
+        * gibberish when printed out, so print out the 1's
+        * complement instead of the actual value here.  Note
+        * though that you still need to specify filters for the
+        * actual value, not the displayed one.
+        */
+       TP_printk("address referenced: 0x%p bounds: lower: 0x%llx ~upper: 0x%llx",
+               __entry->addr_referenced,
+               __entry->lower_bound,
+               ~__entry->upper_bound
+       )
+);
+
+TRACE_EVENT(bounds_exception_mpx,
+
+       TP_PROTO(const struct bndcsr *bndcsr),
+       TP_ARGS(bndcsr),
+
+       TP_STRUCT__entry(
+               __field(u64, bndcfgu)
+               __field(u64, bndstatus)
+       ),
+
+       TP_fast_assign(
+               /* need to get rid of the 'const' on bndcsr */
+               __entry->bndcfgu   = (u64)bndcsr->bndcfgu;
+               __entry->bndstatus = (u64)bndcsr->bndstatus;
+       ),
+
+       TP_printk("bndcfgu:0x%llx bndstatus:0x%llx",
+               __entry->bndcfgu,
+               __entry->bndstatus)
+);
+
+DECLARE_EVENT_CLASS(mpx_range_trace,
+
+       TP_PROTO(unsigned long start,
+                unsigned long end),
+       TP_ARGS(start, end),
+
+       TP_STRUCT__entry(
+               __field(unsigned long, start)
+               __field(unsigned long, end)
+       ),
+
+       TP_fast_assign(
+               __entry->start = start;
+               __entry->end   = end;
+       ),
+
+       TP_printk("[0x%p:0x%p]",
+               (void *)__entry->start,
+               (void *)__entry->end
+       )
+);
+
+DEFINE_EVENT(mpx_range_trace, mpx_unmap_zap,
+       TP_PROTO(unsigned long start, unsigned long end),
+       TP_ARGS(start, end)
+);
+
+DEFINE_EVENT(mpx_range_trace, mpx_unmap_search,
+       TP_PROTO(unsigned long start, unsigned long end),
+       TP_ARGS(start, end)
+);
+
+TRACE_EVENT(mpx_new_bounds_table,
+
+       TP_PROTO(unsigned long table_vaddr),
+       TP_ARGS(table_vaddr),
+
+       TP_STRUCT__entry(
+               __field(unsigned long, table_vaddr)
+       ),
+
+       TP_fast_assign(
+               __entry->table_vaddr = table_vaddr;
+       ),
+
+       TP_printk("table vaddr:%p", (void *)__entry->table_vaddr)
+);
+
+#else
+
+/*
+ * This gets used outside of MPX-specific code, so we need a stub.
+ */
+static inline void trace_bounds_exception_mpx(const struct bndcsr *bndcsr)
+{
+}
+
+#endif /* CONFIG_X86_INTEL_MPX */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH asm/trace/
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE mpx
+#endif /* _TRACE_MPX_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
index 4e49d7dff78e5f30ffb6353c37277eaf6ab264aa..c5380bea2a36aaa533fd8aa28c58ff2ee68a7dec 100644 (file)
@@ -108,7 +108,8 @@ extern int panic_on_unrecovered_nmi;
 void math_emulate(struct math_emu_info *);
 #ifndef CONFIG_X86_32
 asmlinkage void smp_thermal_interrupt(void);
-asmlinkage void mce_threshold_interrupt(void);
+asmlinkage void smp_threshold_interrupt(void);
+asmlinkage void smp_deferred_error_interrupt(void);
 #endif
 
 extern enum ctx_state ist_enter(struct pt_regs *regs);
index ace9dec050b17b1a766899a946ab83d6bbe31641..a8df874f3e8825b0ea6bde91909b8b3d7b380ba7 100644 (file)
@@ -74,7 +74,8 @@ static inline bool __chk_range_not_ok(unsigned long addr, unsigned long size, un
  * @addr: User space pointer to start of block to check
  * @size: Size of block to check
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * Checks if a pointer to a block of memory in user space is valid.
  *
@@ -145,7 +146,8 @@ __typeof__(__builtin_choose_expr(sizeof(x) > sizeof(0UL), 0ULL, 0UL))
  * @x:   Variable to store result.
  * @ptr: Source address, in user space.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * This macro copies a single simple variable from user space to kernel
  * space.  It supports simple types like char and int, but not larger
@@ -240,7 +242,8 @@ extern void __put_user_8(void);
  * @x:   Value to copy to user space.
  * @ptr: Destination address, in user space.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * This macro copies a single simple value from kernel space to user
  * space.  It supports simple types like char and int, but not larger
@@ -455,7 +458,8 @@ struct __large_struct { unsigned long buf[100]; };
  * @x:   Variable to store result.
  * @ptr: Source address, in user space.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * This macro copies a single simple variable from user space to kernel
  * space.  It supports simple types like char and int, but not larger
@@ -479,7 +483,8 @@ struct __large_struct { unsigned long buf[100]; };
  * @x:   Value to copy to user space.
  * @ptr: Destination address, in user space.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * This macro copies a single simple value from kernel space to user
  * space.  It supports simple types like char and int, but not larger
index 3c03a5de64d30c01c1408953bf75c4970f54bcb1..f5dcb5204dcd5b27e8b8e9a1b87612a28cda10c6 100644 (file)
@@ -59,6 +59,10 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
                        __put_user_size(*(u32 *)from, (u32 __user *)to,
                                        4, ret, 4);
                        return ret;
+               case 8:
+                       __put_user_size(*(u64 *)from, (u64 __user *)to,
+                                       8, ret, 8);
+                       return ret;
                }
        }
        return __copy_to_user_ll(to, from, n);
@@ -70,7 +74,8 @@ __copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
  * @from: Source address, in kernel space.
  * @n:    Number of bytes to copy.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * Copy data from kernel space to user space.  Caller must check
  * the specified block with access_ok() before calling this function.
@@ -117,7 +122,8 @@ __copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
  * @from: Source address, in user space.
  * @n:    Number of bytes to copy.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * Copy data from user space to kernel space.  Caller must check
  * the specified block with access_ok() before calling this function.
index ccab4af1646d440584bf981c3a08784590b961dc..59a54e869f1598f05a38310e955d360f59bd87ad 100644 (file)
@@ -14,8 +14,8 @@ struct user_ymmh_regs {
        __u32 ymmh_space[64];
 };
 
-struct user_xsave_hdr {
-       __u64 xstate_bv;
+struct user_xstate_header {
+       __u64 xfeatures;
        __u64 reserved1[2];
        __u64 reserved2[5];
 };
@@ -41,11 +41,11 @@ struct user_xsave_hdr {
  * particular process/thread.
  *
  * Also when the user modifies certain state FP/SSE/etc through the
- * ptrace interface, they must ensure that the xsave_hdr.xstate_bv
+ * ptrace interface, they must ensure that the header.xfeatures
  * bytes[512..519] of the memory layout are updated correspondingly.
  * i.e., for example when FP state is modified to a non-init state,
- * xsave_hdr.xstate_bv's bit 0 must be set to '1', when SSE is modified to
- * non-init state, xsave_hdr.xstate_bv's bit 1 must to be set to '1', etc.
+ * header.xfeatures's bit 0 must be set to '1', when SSE is modified to
+ * non-init state, header.xfeatures's bit 1 must to be set to '1', etc.
  */
 #define USER_XSTATE_FX_SW_WORDS 6
 #define USER_XSTATE_XCR0_WORD  0
@@ -55,7 +55,7 @@ struct user_xstateregs {
                __u64 fpx_space[58];
                __u64 xstate_fx_sw[USER_XSTATE_FX_SW_WORDS];
        } i387;
-       struct user_xsave_hdr xsave_hdr;
+       struct user_xstate_header header;
        struct user_ymmh_regs ymmh;
        /* further processor state extensions go here */
 };
index f58a9c7a3c86658d6094be935fff23b50f785cc5..48d34d28f5a60543bc72471e2a1931fcf13dc04e 100644 (file)
@@ -171,38 +171,17 @@ struct x86_platform_ops {
 };
 
 struct pci_dev;
-struct msi_msg;
 
 struct x86_msi_ops {
        int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type);
-       void (*compose_msi_msg)(struct pci_dev *dev, unsigned int irq,
-                               unsigned int dest, struct msi_msg *msg,
-                              u8 hpet_id);
        void (*teardown_msi_irq)(unsigned int irq);
        void (*teardown_msi_irqs)(struct pci_dev *dev);
        void (*restore_msi_irqs)(struct pci_dev *dev);
-       int  (*setup_hpet_msi)(unsigned int irq, unsigned int id);
 };
 
-struct IO_APIC_route_entry;
-struct io_apic_irq_attr;
-struct irq_data;
-struct cpumask;
-
 struct x86_io_apic_ops {
-       void            (*init)   (void);
        unsigned int    (*read)   (unsigned int apic, unsigned int reg);
-       void            (*write)  (unsigned int apic, unsigned int reg, unsigned int value);
-       void            (*modify) (unsigned int apic, unsigned int reg, unsigned int value);
        void            (*disable)(void);
-       void            (*print_entries)(unsigned int apic, unsigned int nr_entries);
-       int             (*set_affinity)(struct irq_data *data,
-                                       const struct cpumask *mask,
-                                       bool force);
-       int             (*setup_entry)(int irq, struct IO_APIC_route_entry *entry,
-                                      unsigned int destination, int vector,
-                                      struct io_apic_irq_attr *attr);
-       void            (*eoi_ioapic_pin)(int apic, int pin, int vector);
 };
 
 extern struct x86_init_ops x86_init;
diff --git a/arch/x86/include/asm/xcr.h b/arch/x86/include/asm/xcr.h
deleted file mode 100644 (file)
index f2cba4e..0000000
+++ /dev/null
@@ -1,49 +0,0 @@
-/* -*- linux-c -*- ------------------------------------------------------- *
- *
- *   Copyright 2008 rPath, Inc. - All Rights Reserved
- *
- *   This file is part of the Linux kernel, and is made available under
- *   the terms of the GNU General Public License version 2 or (at your
- *   option) any later version; incorporated herein by reference.
- *
- * ----------------------------------------------------------------------- */
-
-/*
- * asm-x86/xcr.h
- *
- * Definitions for the eXtended Control Register instructions
- */
-
-#ifndef _ASM_X86_XCR_H
-#define _ASM_X86_XCR_H
-
-#define XCR_XFEATURE_ENABLED_MASK      0x00000000
-
-#ifdef __KERNEL__
-# ifndef __ASSEMBLY__
-
-#include <linux/types.h>
-
-static inline u64 xgetbv(u32 index)
-{
-       u32 eax, edx;
-
-       asm volatile(".byte 0x0f,0x01,0xd0" /* xgetbv */
-                    : "=a" (eax), "=d" (edx)
-                    : "c" (index));
-       return eax + ((u64)edx << 32);
-}
-
-static inline void xsetbv(u32 index, u64 value)
-{
-       u32 eax = value;
-       u32 edx = value >> 32;
-
-       asm volatile(".byte 0x0f,0x01,0xd1" /* xsetbv */
-                    : : "a" (eax), "d" (edx), "c" (index));
-}
-
-# endif /* __ASSEMBLY__ */
-#endif /* __KERNEL__ */
-
-#endif /* _ASM_X86_XCR_H */
index d8829751b3f895e9fd19fa6bd597758650f417b8..1f5c5161ead682664dc30fc5dda802de2de0bc4b 100644 (file)
@@ -36,7 +36,7 @@
  * no advantages to be gotten from x86-64 here anyways.
  */
 
-#include <asm/i387.h>
+#include <asm/fpu/api.h>
 
 #ifdef CONFIG_X86_32
 /* reduce register pressure */
index ce05722e3c68bce4d72a1bcdc9e798b5014581cf..5a08bc8bff33934e10b4b9afe8e3236ac8c5ce93 100644 (file)
@@ -26,7 +26,7 @@
 #define XO3(x, y)      "       pxor   8*("#x")(%4), %%mm"#y"   ;\n"
 #define XO4(x, y)      "       pxor   8*("#x")(%5), %%mm"#y"   ;\n"
 
-#include <asm/i387.h>
+#include <asm/fpu/api.h>
 
 static void
 xor_pII_mmx_2(unsigned long bytes, unsigned long *p1, unsigned long *p2)
index 492b29802f571b0363a22fe686f708c8235c317a..7c0a517ec7511a667166c216df8357087ff3e7b0 100644 (file)
@@ -18,7 +18,7 @@
 #ifdef CONFIG_AS_AVX
 
 #include <linux/compiler.h>
-#include <asm/i387.h>
+#include <asm/fpu/api.h>
 
 #define BLOCK4(i) \
                BLOCK(32 * i, 0) \
diff --git a/arch/x86/include/asm/xsave.h b/arch/x86/include/asm/xsave.h
deleted file mode 100644 (file)
index c9a6d68..0000000
+++ /dev/null
@@ -1,257 +0,0 @@
-#ifndef __ASM_X86_XSAVE_H
-#define __ASM_X86_XSAVE_H
-
-#include <linux/types.h>
-#include <asm/processor.h>
-
-#define XSTATE_CPUID           0x0000000d
-
-#define XSTATE_FP              0x1
-#define XSTATE_SSE             0x2
-#define XSTATE_YMM             0x4
-#define XSTATE_BNDREGS         0x8
-#define XSTATE_BNDCSR          0x10
-#define XSTATE_OPMASK          0x20
-#define XSTATE_ZMM_Hi256       0x40
-#define XSTATE_Hi16_ZMM                0x80
-
-#define XSTATE_FPSSE   (XSTATE_FP | XSTATE_SSE)
-#define XSTATE_AVX512  (XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
-/* Bit 63 of XCR0 is reserved for future expansion */
-#define XSTATE_EXTEND_MASK     (~(XSTATE_FPSSE | (1ULL << 63)))
-
-#define FXSAVE_SIZE    512
-
-#define XSAVE_HDR_SIZE     64
-#define XSAVE_HDR_OFFSET    FXSAVE_SIZE
-
-#define XSAVE_YMM_SIZE     256
-#define XSAVE_YMM_OFFSET    (XSAVE_HDR_SIZE + XSAVE_HDR_OFFSET)
-
-/* Supported features which support lazy state saving */
-#define XSTATE_LAZY    (XSTATE_FP | XSTATE_SSE | XSTATE_YMM                  \
-                       | XSTATE_OPMASK | XSTATE_ZMM_Hi256 | XSTATE_Hi16_ZMM)
-
-/* Supported features which require eager state saving */
-#define XSTATE_EAGER   (XSTATE_BNDREGS | XSTATE_BNDCSR)
-
-/* All currently supported features */
-#define XCNTXT_MASK    (XSTATE_LAZY | XSTATE_EAGER)
-
-#ifdef CONFIG_X86_64
-#define REX_PREFIX     "0x48, "
-#else
-#define REX_PREFIX
-#endif
-
-extern unsigned int xstate_size;
-extern u64 pcntxt_mask;
-extern u64 xstate_fx_sw_bytes[USER_XSTATE_FX_SW_WORDS];
-extern struct xsave_struct *init_xstate_buf;
-
-extern void xsave_init(void);
-extern void update_regset_xstate_info(unsigned int size, u64 xstate_mask);
-extern int init_fpu(struct task_struct *child);
-
-/* These macros all use (%edi)/(%rdi) as the single memory argument. */
-#define XSAVE          ".byte " REX_PREFIX "0x0f,0xae,0x27"
-#define XSAVEOPT       ".byte " REX_PREFIX "0x0f,0xae,0x37"
-#define XSAVES         ".byte " REX_PREFIX "0x0f,0xc7,0x2f"
-#define XRSTOR         ".byte " REX_PREFIX "0x0f,0xae,0x2f"
-#define XRSTORS                ".byte " REX_PREFIX "0x0f,0xc7,0x1f"
-
-#define xstate_fault   ".section .fixup,\"ax\"\n"      \
-                       "3:  movl $-1,%[err]\n"         \
-                       "    jmp  2b\n"                 \
-                       ".previous\n"                   \
-                       _ASM_EXTABLE(1b, 3b)            \
-                       : [err] "=r" (err)
-
-/*
- * This function is called only during boot time when x86 caps are not set
- * up and alternative can not be used yet.
- */
-static inline int xsave_state_booting(struct xsave_struct *fx, u64 mask)
-{
-       u32 lmask = mask;
-       u32 hmask = mask >> 32;
-       int err = 0;
-
-       WARN_ON(system_state != SYSTEM_BOOTING);
-
-       if (boot_cpu_has(X86_FEATURE_XSAVES))
-               asm volatile("1:"XSAVES"\n\t"
-                       "2:\n\t"
-                            xstate_fault
-                       : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
-                       :   "memory");
-       else
-               asm volatile("1:"XSAVE"\n\t"
-                       "2:\n\t"
-                            xstate_fault
-                       : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
-                       :   "memory");
-       return err;
-}
-
-/*
- * This function is called only during boot time when x86 caps are not set
- * up and alternative can not be used yet.
- */
-static inline int xrstor_state_booting(struct xsave_struct *fx, u64 mask)
-{
-       u32 lmask = mask;
-       u32 hmask = mask >> 32;
-       int err = 0;
-
-       WARN_ON(system_state != SYSTEM_BOOTING);
-
-       if (boot_cpu_has(X86_FEATURE_XSAVES))
-               asm volatile("1:"XRSTORS"\n\t"
-                       "2:\n\t"
-                            xstate_fault
-                       : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
-                       :   "memory");
-       else
-               asm volatile("1:"XRSTOR"\n\t"
-                       "2:\n\t"
-                            xstate_fault
-                       : "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
-                       :   "memory");
-       return err;
-}
-
-/*
- * Save processor xstate to xsave area.
- */
-static inline int xsave_state(struct xsave_struct *fx, u64 mask)
-{
-       u32 lmask = mask;
-       u32 hmask = mask >> 32;
-       int err = 0;
-
-       /*
-        * If xsaves is enabled, xsaves replaces xsaveopt because
-        * it supports compact format and supervisor states in addition to
-        * modified optimization in xsaveopt.
-        *
-        * Otherwise, if xsaveopt is enabled, xsaveopt replaces xsave
-        * because xsaveopt supports modified optimization which is not
-        * supported by xsave.
-        *
-        * If none of xsaves and xsaveopt is enabled, use xsave.
-        */
-       alternative_input_2(
-               "1:"XSAVE,
-               XSAVEOPT,
-               X86_FEATURE_XSAVEOPT,
-               XSAVES,
-               X86_FEATURE_XSAVES,
-               [fx] "D" (fx), "a" (lmask), "d" (hmask) :
-               "memory");
-       asm volatile("2:\n\t"
-                    xstate_fault
-                    : "0" (0)
-                    : "memory");
-
-       return err;
-}
-
-/*
- * Restore processor xstate from xsave area.
- */
-static inline int xrstor_state(struct xsave_struct *fx, u64 mask)
-{
-       int err = 0;
-       u32 lmask = mask;
-       u32 hmask = mask >> 32;
-
-       /*
-        * Use xrstors to restore context if it is enabled. xrstors supports
-        * compacted format of xsave area which is not supported by xrstor.
-        */
-       alternative_input(
-               "1: " XRSTOR,
-               XRSTORS,
-               X86_FEATURE_XSAVES,
-               "D" (fx), "m" (*fx), "a" (lmask), "d" (hmask)
-               : "memory");
-
-       asm volatile("2:\n"
-                    xstate_fault
-                    : "0" (0)
-                    : "memory");
-
-       return err;
-}
-
-/*
- * Save xstate context for old process during context switch.
- */
-static inline void fpu_xsave(struct fpu *fpu)
-{
-       xsave_state(&fpu->state->xsave, -1);
-}
-
-/*
- * Restore xstate context for new process during context switch.
- */
-static inline int fpu_xrstor_checking(struct xsave_struct *fx)
-{
-       return xrstor_state(fx, -1);
-}
-
-/*
- * Save xstate to user space xsave area.
- *
- * We don't use modified optimization because xrstor/xrstors might track
- * a different application.
- *
- * We don't use compacted format xsave area for
- * backward compatibility for old applications which don't understand
- * compacted format of xsave area.
- */
-static inline int xsave_user(struct xsave_struct __user *buf)
-{
-       int err;
-
-       /*
-        * Clear the xsave header first, so that reserved fields are
-        * initialized to zero.
-        */
-       err = __clear_user(&buf->xsave_hdr, sizeof(buf->xsave_hdr));
-       if (unlikely(err))
-               return -EFAULT;
-
-       __asm__ __volatile__(ASM_STAC "\n"
-                            "1:"XSAVE"\n"
-                            "2: " ASM_CLAC "\n"
-                            xstate_fault
-                            : "D" (buf), "a" (-1), "d" (-1), "0" (0)
-                            : "memory");
-       return err;
-}
-
-/*
- * Restore xstate from user space xsave area.
- */
-static inline int xrestore_user(struct xsave_struct __user *buf, u64 mask)
-{
-       int err = 0;
-       struct xsave_struct *xstate = ((__force struct xsave_struct *)buf);
-       u32 lmask = mask;
-       u32 hmask = mask >> 32;
-
-       __asm__ __volatile__(ASM_STAC "\n"
-                            "1:"XRSTOR"\n"
-                            "2: " ASM_CLAC "\n"
-                            xstate_fault
-                            : "D" (xstate), "a" (lmask), "d" (hmask), "0" (0)
-                            : "memory");       /* memory required? */
-       return err;
-}
-
-void *get_xsave_addr(struct xsave_struct *xsave, int xstate);
-void setup_xstate_comp(void);
-
-#endif
diff --git a/arch/x86/include/uapi/asm/msr-index.h b/arch/x86/include/uapi/asm/msr-index.h
deleted file mode 100644 (file)
index c469490..0000000
+++ /dev/null
@@ -1,662 +0,0 @@
-#ifndef _ASM_X86_MSR_INDEX_H
-#define _ASM_X86_MSR_INDEX_H
-
-/* CPU model specific register (MSR) numbers */
-
-/* x86-64 specific MSRs */
-#define MSR_EFER               0xc0000080 /* extended feature register */
-#define MSR_STAR               0xc0000081 /* legacy mode SYSCALL target */
-#define MSR_LSTAR              0xc0000082 /* long mode SYSCALL target */
-#define MSR_CSTAR              0xc0000083 /* compat mode SYSCALL target */
-#define MSR_SYSCALL_MASK       0xc0000084 /* EFLAGS mask for syscall */
-#define MSR_FS_BASE            0xc0000100 /* 64bit FS base */
-#define MSR_GS_BASE            0xc0000101 /* 64bit GS base */
-#define MSR_KERNEL_GS_BASE     0xc0000102 /* SwapGS GS shadow */
-#define MSR_TSC_AUX            0xc0000103 /* Auxiliary TSC */
-
-/* EFER bits: */
-#define _EFER_SCE              0  /* SYSCALL/SYSRET */
-#define _EFER_LME              8  /* Long mode enable */
-#define _EFER_LMA              10 /* Long mode active (read-only) */
-#define _EFER_NX               11 /* No execute enable */
-#define _EFER_SVME             12 /* Enable virtualization */
-#define _EFER_LMSLE            13 /* Long Mode Segment Limit Enable */
-#define _EFER_FFXSR            14 /* Enable Fast FXSAVE/FXRSTOR */
-
-#define EFER_SCE               (1<<_EFER_SCE)
-#define EFER_LME               (1<<_EFER_LME)
-#define EFER_LMA               (1<<_EFER_LMA)
-#define EFER_NX                        (1<<_EFER_NX)
-#define EFER_SVME              (1<<_EFER_SVME)
-#define EFER_LMSLE             (1<<_EFER_LMSLE)
-#define EFER_FFXSR             (1<<_EFER_FFXSR)
-
-/* Intel MSRs. Some also available on other CPUs */
-#define MSR_IA32_PERFCTR0              0x000000c1
-#define MSR_IA32_PERFCTR1              0x000000c2
-#define MSR_FSB_FREQ                   0x000000cd
-#define MSR_NHM_PLATFORM_INFO          0x000000ce
-
-#define MSR_NHM_SNB_PKG_CST_CFG_CTL    0x000000e2
-#define NHM_C3_AUTO_DEMOTE             (1UL << 25)
-#define NHM_C1_AUTO_DEMOTE             (1UL << 26)
-#define ATM_LNC_C6_AUTO_DEMOTE         (1UL << 25)
-#define SNB_C1_AUTO_UNDEMOTE           (1UL << 27)
-#define SNB_C3_AUTO_UNDEMOTE           (1UL << 28)
-
-#define MSR_PLATFORM_INFO              0x000000ce
-#define MSR_MTRRcap                    0x000000fe
-#define MSR_IA32_BBL_CR_CTL            0x00000119
-#define MSR_IA32_BBL_CR_CTL3           0x0000011e
-
-#define MSR_IA32_SYSENTER_CS           0x00000174
-#define MSR_IA32_SYSENTER_ESP          0x00000175
-#define MSR_IA32_SYSENTER_EIP          0x00000176
-
-#define MSR_IA32_MCG_CAP               0x00000179
-#define MSR_IA32_MCG_STATUS            0x0000017a
-#define MSR_IA32_MCG_CTL               0x0000017b
-
-#define MSR_OFFCORE_RSP_0              0x000001a6
-#define MSR_OFFCORE_RSP_1              0x000001a7
-#define MSR_NHM_TURBO_RATIO_LIMIT      0x000001ad
-#define MSR_IVT_TURBO_RATIO_LIMIT      0x000001ae
-#define MSR_TURBO_RATIO_LIMIT          0x000001ad
-#define MSR_TURBO_RATIO_LIMIT1         0x000001ae
-#define MSR_TURBO_RATIO_LIMIT2         0x000001af
-
-#define MSR_LBR_SELECT                 0x000001c8
-#define MSR_LBR_TOS                    0x000001c9
-#define MSR_LBR_NHM_FROM               0x00000680
-#define MSR_LBR_NHM_TO                 0x000006c0
-#define MSR_LBR_CORE_FROM              0x00000040
-#define MSR_LBR_CORE_TO                        0x00000060
-
-#define MSR_IA32_PEBS_ENABLE           0x000003f1
-#define MSR_IA32_DS_AREA               0x00000600
-#define MSR_IA32_PERF_CAPABILITIES     0x00000345
-#define MSR_PEBS_LD_LAT_THRESHOLD      0x000003f6
-
-#define MSR_IA32_RTIT_CTL              0x00000570
-#define RTIT_CTL_TRACEEN               BIT(0)
-#define RTIT_CTL_OS                    BIT(2)
-#define RTIT_CTL_USR                   BIT(3)
-#define RTIT_CTL_CR3EN                 BIT(7)
-#define RTIT_CTL_TOPA                  BIT(8)
-#define RTIT_CTL_TSC_EN                        BIT(10)
-#define RTIT_CTL_DISRETC               BIT(11)
-#define RTIT_CTL_BRANCH_EN             BIT(13)
-#define MSR_IA32_RTIT_STATUS           0x00000571
-#define RTIT_STATUS_CONTEXTEN          BIT(1)
-#define RTIT_STATUS_TRIGGEREN          BIT(2)
-#define RTIT_STATUS_ERROR              BIT(4)
-#define RTIT_STATUS_STOPPED            BIT(5)
-#define MSR_IA32_RTIT_CR3_MATCH                0x00000572
-#define MSR_IA32_RTIT_OUTPUT_BASE      0x00000560
-#define MSR_IA32_RTIT_OUTPUT_MASK      0x00000561
-
-#define MSR_MTRRfix64K_00000           0x00000250
-#define MSR_MTRRfix16K_80000           0x00000258
-#define MSR_MTRRfix16K_A0000           0x00000259
-#define MSR_MTRRfix4K_C0000            0x00000268
-#define MSR_MTRRfix4K_C8000            0x00000269
-#define MSR_MTRRfix4K_D0000            0x0000026a
-#define MSR_MTRRfix4K_D8000            0x0000026b
-#define MSR_MTRRfix4K_E0000            0x0000026c
-#define MSR_MTRRfix4K_E8000            0x0000026d
-#define MSR_MTRRfix4K_F0000            0x0000026e
-#define MSR_MTRRfix4K_F8000            0x0000026f
-#define MSR_MTRRdefType                        0x000002ff
-
-#define MSR_IA32_CR_PAT                        0x00000277
-
-#define MSR_IA32_DEBUGCTLMSR           0x000001d9
-#define MSR_IA32_LASTBRANCHFROMIP      0x000001db
-#define MSR_IA32_LASTBRANCHTOIP                0x000001dc
-#define MSR_IA32_LASTINTFROMIP         0x000001dd
-#define MSR_IA32_LASTINTTOIP           0x000001de
-
-/* DEBUGCTLMSR bits (others vary by model): */
-#define DEBUGCTLMSR_LBR                        (1UL <<  0) /* last branch recording */
-#define DEBUGCTLMSR_BTF                        (1UL <<  1) /* single-step on branches */
-#define DEBUGCTLMSR_TR                 (1UL <<  6)
-#define DEBUGCTLMSR_BTS                        (1UL <<  7)
-#define DEBUGCTLMSR_BTINT              (1UL <<  8)
-#define DEBUGCTLMSR_BTS_OFF_OS         (1UL <<  9)
-#define DEBUGCTLMSR_BTS_OFF_USR                (1UL << 10)
-#define DEBUGCTLMSR_FREEZE_LBRS_ON_PMI (1UL << 11)
-
-#define MSR_IA32_POWER_CTL             0x000001fc
-
-#define MSR_IA32_MC0_CTL               0x00000400
-#define MSR_IA32_MC0_STATUS            0x00000401
-#define MSR_IA32_MC0_ADDR              0x00000402
-#define MSR_IA32_MC0_MISC              0x00000403
-
-/* C-state Residency Counters */
-#define MSR_PKG_C3_RESIDENCY           0x000003f8
-#define MSR_PKG_C6_RESIDENCY           0x000003f9
-#define MSR_PKG_C7_RESIDENCY           0x000003fa
-#define MSR_CORE_C3_RESIDENCY          0x000003fc
-#define MSR_CORE_C6_RESIDENCY          0x000003fd
-#define MSR_CORE_C7_RESIDENCY          0x000003fe
-#define MSR_PKG_C2_RESIDENCY           0x0000060d
-#define MSR_PKG_C8_RESIDENCY           0x00000630
-#define MSR_PKG_C9_RESIDENCY           0x00000631
-#define MSR_PKG_C10_RESIDENCY          0x00000632
-
-/* Run Time Average Power Limiting (RAPL) Interface */
-
-#define MSR_RAPL_POWER_UNIT            0x00000606
-
-#define MSR_PKG_POWER_LIMIT            0x00000610
-#define MSR_PKG_ENERGY_STATUS          0x00000611
-#define MSR_PKG_PERF_STATUS            0x00000613
-#define MSR_PKG_POWER_INFO             0x00000614
-
-#define MSR_DRAM_POWER_LIMIT           0x00000618
-#define MSR_DRAM_ENERGY_STATUS         0x00000619
-#define MSR_DRAM_PERF_STATUS           0x0000061b
-#define MSR_DRAM_POWER_INFO            0x0000061c
-
-#define MSR_PP0_POWER_LIMIT            0x00000638
-#define MSR_PP0_ENERGY_STATUS          0x00000639
-#define MSR_PP0_POLICY                 0x0000063a
-#define MSR_PP0_PERF_STATUS            0x0000063b
-
-#define MSR_PP1_POWER_LIMIT            0x00000640
-#define MSR_PP1_ENERGY_STATUS          0x00000641
-#define MSR_PP1_POLICY                 0x00000642
-
-#define MSR_PKG_WEIGHTED_CORE_C0_RES   0x00000658
-#define MSR_PKG_ANY_CORE_C0_RES                0x00000659
-#define MSR_PKG_ANY_GFXE_C0_RES                0x0000065A
-#define MSR_PKG_BOTH_CORE_GFXE_C0_RES  0x0000065B
-
-#define MSR_CORE_C1_RES                        0x00000660
-
-#define MSR_CC6_DEMOTION_POLICY_CONFIG 0x00000668
-#define MSR_MC6_DEMOTION_POLICY_CONFIG 0x00000669
-
-#define MSR_CORE_PERF_LIMIT_REASONS    0x00000690
-#define MSR_GFX_PERF_LIMIT_REASONS     0x000006B0
-#define MSR_RING_PERF_LIMIT_REASONS    0x000006B1
-
-/* Hardware P state interface */
-#define MSR_PPERF                      0x0000064e
-#define MSR_PERF_LIMIT_REASONS         0x0000064f
-#define MSR_PM_ENABLE                  0x00000770
-#define MSR_HWP_CAPABILITIES           0x00000771
-#define MSR_HWP_REQUEST_PKG            0x00000772
-#define MSR_HWP_INTERRUPT              0x00000773
-#define MSR_HWP_REQUEST                0x00000774
-#define MSR_HWP_STATUS                 0x00000777
-
-/* CPUID.6.EAX */
-#define HWP_BASE_BIT                   (1<<7)
-#define HWP_NOTIFICATIONS_BIT          (1<<8)
-#define HWP_ACTIVITY_WINDOW_BIT                (1<<9)
-#define HWP_ENERGY_PERF_PREFERENCE_BIT (1<<10)
-#define HWP_PACKAGE_LEVEL_REQUEST_BIT  (1<<11)
-
-/* IA32_HWP_CAPABILITIES */
-#define HWP_HIGHEST_PERF(x)            (x & 0xff)
-#define HWP_GUARANTEED_PERF(x)         ((x & (0xff << 8)) >>8)
-#define HWP_MOSTEFFICIENT_PERF(x)      ((x & (0xff << 16)) >>16)
-#define HWP_LOWEST_PERF(x)             ((x & (0xff << 24)) >>24)
-
-/* IA32_HWP_REQUEST */
-#define HWP_MIN_PERF(x)                (x & 0xff)
-#define HWP_MAX_PERF(x)                ((x & 0xff) << 8)
-#define HWP_DESIRED_PERF(x)            ((x & 0xff) << 16)
-#define HWP_ENERGY_PERF_PREFERENCE(x)  ((x & 0xff) << 24)
-#define HWP_ACTIVITY_WINDOW(x)         ((x & 0xff3) << 32)
-#define HWP_PACKAGE_CONTROL(x)         ((x & 0x1) << 42)
-
-/* IA32_HWP_STATUS */
-#define HWP_GUARANTEED_CHANGE(x)       (x & 0x1)
-#define HWP_EXCURSION_TO_MINIMUM(x)    (x & 0x4)
-
-/* IA32_HWP_INTERRUPT */
-#define HWP_CHANGE_TO_GUARANTEED_INT(x)        (x & 0x1)
-#define HWP_EXCURSION_TO_MINIMUM_INT(x)        (x & 0x2)
-
-#define MSR_AMD64_MC0_MASK             0xc0010044
-
-#define MSR_IA32_MCx_CTL(x)            (MSR_IA32_MC0_CTL + 4*(x))
-#define MSR_IA32_MCx_STATUS(x)         (MSR_IA32_MC0_STATUS + 4*(x))
-#define MSR_IA32_MCx_ADDR(x)           (MSR_IA32_MC0_ADDR + 4*(x))
-#define MSR_IA32_MCx_MISC(x)           (MSR_IA32_MC0_MISC + 4*(x))
-
-#define MSR_AMD64_MCx_MASK(x)          (MSR_AMD64_MC0_MASK + (x))
-
-/* These are consecutive and not in the normal 4er MCE bank block */
-#define MSR_IA32_MC0_CTL2              0x00000280
-#define MSR_IA32_MCx_CTL2(x)           (MSR_IA32_MC0_CTL2 + (x))
-
-#define MSR_P6_PERFCTR0                        0x000000c1
-#define MSR_P6_PERFCTR1                        0x000000c2
-#define MSR_P6_EVNTSEL0                        0x00000186
-#define MSR_P6_EVNTSEL1                        0x00000187
-
-#define MSR_KNC_PERFCTR0               0x00000020
-#define MSR_KNC_PERFCTR1               0x00000021
-#define MSR_KNC_EVNTSEL0               0x00000028
-#define MSR_KNC_EVNTSEL1               0x00000029
-
-/* Alternative perfctr range with full access. */
-#define MSR_IA32_PMC0                  0x000004c1
-
-/* AMD64 MSRs. Not complete. See the architecture manual for a more
-   complete list. */
-
-#define MSR_AMD64_PATCH_LEVEL          0x0000008b
-#define MSR_AMD64_TSC_RATIO            0xc0000104
-#define MSR_AMD64_NB_CFG               0xc001001f
-#define MSR_AMD64_PATCH_LOADER         0xc0010020
-#define MSR_AMD64_OSVW_ID_LENGTH       0xc0010140
-#define MSR_AMD64_OSVW_STATUS          0xc0010141
-#define MSR_AMD64_LS_CFG               0xc0011020
-#define MSR_AMD64_DC_CFG               0xc0011022
-#define MSR_AMD64_BU_CFG2              0xc001102a
-#define MSR_AMD64_IBSFETCHCTL          0xc0011030
-#define MSR_AMD64_IBSFETCHLINAD                0xc0011031
-#define MSR_AMD64_IBSFETCHPHYSAD       0xc0011032
-#define MSR_AMD64_IBSFETCH_REG_COUNT   3
-#define MSR_AMD64_IBSFETCH_REG_MASK    ((1UL<<MSR_AMD64_IBSFETCH_REG_COUNT)-1)
-#define MSR_AMD64_IBSOPCTL             0xc0011033
-#define MSR_AMD64_IBSOPRIP             0xc0011034
-#define MSR_AMD64_IBSOPDATA            0xc0011035
-#define MSR_AMD64_IBSOPDATA2           0xc0011036
-#define MSR_AMD64_IBSOPDATA3           0xc0011037
-#define MSR_AMD64_IBSDCLINAD           0xc0011038
-#define MSR_AMD64_IBSDCPHYSAD          0xc0011039
-#define MSR_AMD64_IBSOP_REG_COUNT      7
-#define MSR_AMD64_IBSOP_REG_MASK       ((1UL<<MSR_AMD64_IBSOP_REG_COUNT)-1)
-#define MSR_AMD64_IBSCTL               0xc001103a
-#define MSR_AMD64_IBSBRTARGET          0xc001103b
-#define MSR_AMD64_IBSOPDATA4           0xc001103d
-#define MSR_AMD64_IBS_REG_COUNT_MAX    8 /* includes MSR_AMD64_IBSBRTARGET */
-
-/* Fam 16h MSRs */
-#define MSR_F16H_L2I_PERF_CTL          0xc0010230
-#define MSR_F16H_L2I_PERF_CTR          0xc0010231
-#define MSR_F16H_DR1_ADDR_MASK         0xc0011019
-#define MSR_F16H_DR2_ADDR_MASK         0xc001101a
-#define MSR_F16H_DR3_ADDR_MASK         0xc001101b
-#define MSR_F16H_DR0_ADDR_MASK         0xc0011027
-
-/* Fam 15h MSRs */
-#define MSR_F15H_PERF_CTL              0xc0010200
-#define MSR_F15H_PERF_CTR              0xc0010201
-#define MSR_F15H_NB_PERF_CTL           0xc0010240
-#define MSR_F15H_NB_PERF_CTR           0xc0010241
-
-/* Fam 10h MSRs */
-#define MSR_FAM10H_MMIO_CONF_BASE      0xc0010058
-#define FAM10H_MMIO_CONF_ENABLE                (1<<0)
-#define FAM10H_MMIO_CONF_BUSRANGE_MASK 0xf
-#define FAM10H_MMIO_CONF_BUSRANGE_SHIFT 2
-#define FAM10H_MMIO_CONF_BASE_MASK     0xfffffffULL
-#define FAM10H_MMIO_CONF_BASE_SHIFT    20
-#define MSR_FAM10H_NODE_ID             0xc001100c
-
-/* K8 MSRs */
-#define MSR_K8_TOP_MEM1                        0xc001001a
-#define MSR_K8_TOP_MEM2                        0xc001001d
-#define MSR_K8_SYSCFG                  0xc0010010
-#define MSR_K8_INT_PENDING_MSG         0xc0010055
-/* C1E active bits in int pending message */
-#define K8_INTP_C1E_ACTIVE_MASK                0x18000000
-#define MSR_K8_TSEG_ADDR               0xc0010112
-#define K8_MTRRFIXRANGE_DRAM_ENABLE    0x00040000 /* MtrrFixDramEn bit    */
-#define K8_MTRRFIXRANGE_DRAM_MODIFY    0x00080000 /* MtrrFixDramModEn bit */
-#define K8_MTRR_RDMEM_WRMEM_MASK       0x18181818 /* Mask: RdMem|WrMem    */
-
-/* K7 MSRs */
-#define MSR_K7_EVNTSEL0                        0xc0010000
-#define MSR_K7_PERFCTR0                        0xc0010004
-#define MSR_K7_EVNTSEL1                        0xc0010001
-#define MSR_K7_PERFCTR1                        0xc0010005
-#define MSR_K7_EVNTSEL2                        0xc0010002
-#define MSR_K7_PERFCTR2                        0xc0010006
-#define MSR_K7_EVNTSEL3                        0xc0010003
-#define MSR_K7_PERFCTR3                        0xc0010007
-#define MSR_K7_CLK_CTL                 0xc001001b
-#define MSR_K7_HWCR                    0xc0010015
-#define MSR_K7_FID_VID_CTL             0xc0010041
-#define MSR_K7_FID_VID_STATUS          0xc0010042
-
-/* K6 MSRs */
-#define MSR_K6_WHCR                    0xc0000082
-#define MSR_K6_UWCCR                   0xc0000085
-#define MSR_K6_EPMR                    0xc0000086
-#define MSR_K6_PSOR                    0xc0000087
-#define MSR_K6_PFIR                    0xc0000088
-
-/* Centaur-Hauls/IDT defined MSRs. */
-#define MSR_IDT_FCR1                   0x00000107
-#define MSR_IDT_FCR2                   0x00000108
-#define MSR_IDT_FCR3                   0x00000109
-#define MSR_IDT_FCR4                   0x0000010a
-
-#define MSR_IDT_MCR0                   0x00000110
-#define MSR_IDT_MCR1                   0x00000111
-#define MSR_IDT_MCR2                   0x00000112
-#define MSR_IDT_MCR3                   0x00000113
-#define MSR_IDT_MCR4                   0x00000114
-#define MSR_IDT_MCR5                   0x00000115
-#define MSR_IDT_MCR6                   0x00000116
-#define MSR_IDT_MCR7                   0x00000117
-#define MSR_IDT_MCR_CTRL               0x00000120
-
-/* VIA Cyrix defined MSRs*/
-#define MSR_VIA_FCR                    0x00001107
-#define MSR_VIA_LONGHAUL               0x0000110a
-#define MSR_VIA_RNG                    0x0000110b
-#define MSR_VIA_BCR2                   0x00001147
-
-/* Transmeta defined MSRs */
-#define MSR_TMTA_LONGRUN_CTRL          0x80868010
-#define MSR_TMTA_LONGRUN_FLAGS         0x80868011
-#define MSR_TMTA_LRTI_READOUT          0x80868018
-#define MSR_TMTA_LRTI_VOLT_MHZ         0x8086801a
-
-/* Intel defined MSRs. */
-#define MSR_IA32_P5_MC_ADDR            0x00000000
-#define MSR_IA32_P5_MC_TYPE            0x00000001
-#define MSR_IA32_TSC                   0x00000010
-#define MSR_IA32_PLATFORM_ID           0x00000017
-#define MSR_IA32_EBL_CR_POWERON                0x0000002a
-#define MSR_EBC_FREQUENCY_ID           0x0000002c
-#define MSR_SMI_COUNT                  0x00000034
-#define MSR_IA32_FEATURE_CONTROL        0x0000003a
-#define MSR_IA32_TSC_ADJUST             0x0000003b
-#define MSR_IA32_BNDCFGS               0x00000d90
-
-#define MSR_IA32_XSS                   0x00000da0
-
-#define FEATURE_CONTROL_LOCKED                         (1<<0)
-#define FEATURE_CONTROL_VMXON_ENABLED_INSIDE_SMX       (1<<1)
-#define FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX      (1<<2)
-
-#define MSR_IA32_APICBASE              0x0000001b
-#define MSR_IA32_APICBASE_BSP          (1<<8)
-#define MSR_IA32_APICBASE_ENABLE       (1<<11)
-#define MSR_IA32_APICBASE_BASE         (0xfffff<<12)
-
-#define MSR_IA32_TSCDEADLINE           0x000006e0
-
-#define MSR_IA32_UCODE_WRITE           0x00000079
-#define MSR_IA32_UCODE_REV             0x0000008b
-
-#define MSR_IA32_SMM_MONITOR_CTL       0x0000009b
-#define MSR_IA32_SMBASE                        0x0000009e
-
-#define MSR_IA32_PERF_STATUS           0x00000198
-#define MSR_IA32_PERF_CTL              0x00000199
-#define INTEL_PERF_CTL_MASK            0xffff
-#define MSR_AMD_PSTATE_DEF_BASE                0xc0010064
-#define MSR_AMD_PERF_STATUS            0xc0010063
-#define MSR_AMD_PERF_CTL               0xc0010062
-
-#define MSR_IA32_MPERF                 0x000000e7
-#define MSR_IA32_APERF                 0x000000e8
-
-#define MSR_IA32_THERM_CONTROL         0x0000019a
-#define MSR_IA32_THERM_INTERRUPT       0x0000019b
-
-#define THERM_INT_HIGH_ENABLE          (1 << 0)
-#define THERM_INT_LOW_ENABLE           (1 << 1)
-#define THERM_INT_PLN_ENABLE           (1 << 24)
-
-#define MSR_IA32_THERM_STATUS          0x0000019c
-
-#define THERM_STATUS_PROCHOT           (1 << 0)
-#define THERM_STATUS_POWER_LIMIT       (1 << 10)
-
-#define MSR_THERM2_CTL                 0x0000019d
-
-#define MSR_THERM2_CTL_TM_SELECT       (1ULL << 16)
-
-#define MSR_IA32_MISC_ENABLE           0x000001a0
-
-#define MSR_IA32_TEMPERATURE_TARGET    0x000001a2
-
-#define MSR_MISC_PWR_MGMT              0x000001aa
-
-#define MSR_IA32_ENERGY_PERF_BIAS      0x000001b0
-#define ENERGY_PERF_BIAS_PERFORMANCE   0
-#define ENERGY_PERF_BIAS_NORMAL                6
-#define ENERGY_PERF_BIAS_POWERSAVE     15
-
-#define MSR_IA32_PACKAGE_THERM_STATUS          0x000001b1
-
-#define PACKAGE_THERM_STATUS_PROCHOT           (1 << 0)
-#define PACKAGE_THERM_STATUS_POWER_LIMIT       (1 << 10)
-
-#define MSR_IA32_PACKAGE_THERM_INTERRUPT       0x000001b2
-
-#define PACKAGE_THERM_INT_HIGH_ENABLE          (1 << 0)
-#define PACKAGE_THERM_INT_LOW_ENABLE           (1 << 1)
-#define PACKAGE_THERM_INT_PLN_ENABLE           (1 << 24)
-
-/* Thermal Thresholds Support */
-#define THERM_INT_THRESHOLD0_ENABLE    (1 << 15)
-#define THERM_SHIFT_THRESHOLD0        8
-#define THERM_MASK_THRESHOLD0          (0x7f << THERM_SHIFT_THRESHOLD0)
-#define THERM_INT_THRESHOLD1_ENABLE    (1 << 23)
-#define THERM_SHIFT_THRESHOLD1        16
-#define THERM_MASK_THRESHOLD1          (0x7f << THERM_SHIFT_THRESHOLD1)
-#define THERM_STATUS_THRESHOLD0        (1 << 6)
-#define THERM_LOG_THRESHOLD0           (1 << 7)
-#define THERM_STATUS_THRESHOLD1        (1 << 8)
-#define THERM_LOG_THRESHOLD1           (1 << 9)
-
-/* MISC_ENABLE bits: architectural */
-#define MSR_IA32_MISC_ENABLE_FAST_STRING_BIT           0
-#define MSR_IA32_MISC_ENABLE_FAST_STRING               (1ULL << MSR_IA32_MISC_ENABLE_FAST_STRING_BIT)
-#define MSR_IA32_MISC_ENABLE_TCC_BIT                   1
-#define MSR_IA32_MISC_ENABLE_TCC                       (1ULL << MSR_IA32_MISC_ENABLE_TCC_BIT)
-#define MSR_IA32_MISC_ENABLE_EMON_BIT                  7
-#define MSR_IA32_MISC_ENABLE_EMON                      (1ULL << MSR_IA32_MISC_ENABLE_EMON_BIT)
-#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL_BIT           11
-#define MSR_IA32_MISC_ENABLE_BTS_UNAVAIL               (1ULL << MSR_IA32_MISC_ENABLE_BTS_UNAVAIL_BIT)
-#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL_BIT          12
-#define MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL              (1ULL << MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL_BIT)
-#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP_BIT    16
-#define MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP                (1ULL << MSR_IA32_MISC_ENABLE_ENHANCED_SPEEDSTEP_BIT)
-#define MSR_IA32_MISC_ENABLE_MWAIT_BIT                 18
-#define MSR_IA32_MISC_ENABLE_MWAIT                     (1ULL << MSR_IA32_MISC_ENABLE_MWAIT_BIT)
-#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT           22
-#define MSR_IA32_MISC_ENABLE_LIMIT_CPUID               (1ULL << MSR_IA32_MISC_ENABLE_LIMIT_CPUID_BIT)
-#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT          23
-#define MSR_IA32_MISC_ENABLE_XTPR_DISABLE              (1ULL << MSR_IA32_MISC_ENABLE_XTPR_DISABLE_BIT)
-#define MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT            34
-#define MSR_IA32_MISC_ENABLE_XD_DISABLE                        (1ULL << MSR_IA32_MISC_ENABLE_XD_DISABLE_BIT)
-
-/* MISC_ENABLE bits: model-specific, meaning may vary from core to core */
-#define MSR_IA32_MISC_ENABLE_X87_COMPAT_BIT            2
-#define MSR_IA32_MISC_ENABLE_X87_COMPAT                        (1ULL << MSR_IA32_MISC_ENABLE_X87_COMPAT_BIT)
-#define MSR_IA32_MISC_ENABLE_TM1_BIT                   3
-#define MSR_IA32_MISC_ENABLE_TM1                       (1ULL << MSR_IA32_MISC_ENABLE_TM1_BIT)
-#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE_BIT    4
-#define MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE                (1ULL << MSR_IA32_MISC_ENABLE_SPLIT_LOCK_DISABLE_BIT)
-#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE_BIT       6
-#define MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE           (1ULL << MSR_IA32_MISC_ENABLE_L3CACHE_DISABLE_BIT)
-#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK_BIT         8
-#define MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK             (1ULL << MSR_IA32_MISC_ENABLE_SUPPRESS_LOCK_BIT)
-#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT      9
-#define MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE          (1ULL << MSR_IA32_MISC_ENABLE_PREFETCH_DISABLE_BIT)
-#define MSR_IA32_MISC_ENABLE_FERR_BIT                  10
-#define MSR_IA32_MISC_ENABLE_FERR                      (1ULL << MSR_IA32_MISC_ENABLE_FERR_BIT)
-#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX_BIT                10
-#define MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX            (1ULL << MSR_IA32_MISC_ENABLE_FERR_MULTIPLEX_BIT)
-#define MSR_IA32_MISC_ENABLE_TM2_BIT                   13
-#define MSR_IA32_MISC_ENABLE_TM2                       (1ULL << MSR_IA32_MISC_ENABLE_TM2_BIT)
-#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE_BIT      19
-#define MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE          (1ULL << MSR_IA32_MISC_ENABLE_ADJ_PREF_DISABLE_BIT)
-#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK_BIT                20
-#define MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK            (1ULL << MSR_IA32_MISC_ENABLE_SPEEDSTEP_LOCK_BIT)
-#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT_BIT           24
-#define MSR_IA32_MISC_ENABLE_L1D_CONTEXT               (1ULL << MSR_IA32_MISC_ENABLE_L1D_CONTEXT_BIT)
-#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE_BIT      37
-#define MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE          (1ULL << MSR_IA32_MISC_ENABLE_DCU_PREF_DISABLE_BIT)
-#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT         38
-#define MSR_IA32_MISC_ENABLE_TURBO_DISABLE             (1ULL << MSR_IA32_MISC_ENABLE_TURBO_DISABLE_BIT)
-#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT       39
-#define MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE           (1ULL << MSR_IA32_MISC_ENABLE_IP_PREF_DISABLE_BIT)
-
-#define MSR_IA32_TSC_DEADLINE          0x000006E0
-
-/* P4/Xeon+ specific */
-#define MSR_IA32_MCG_EAX               0x00000180
-#define MSR_IA32_MCG_EBX               0x00000181
-#define MSR_IA32_MCG_ECX               0x00000182
-#define MSR_IA32_MCG_EDX               0x00000183
-#define MSR_IA32_MCG_ESI               0x00000184
-#define MSR_IA32_MCG_EDI               0x00000185
-#define MSR_IA32_MCG_EBP               0x00000186
-#define MSR_IA32_MCG_ESP               0x00000187
-#define MSR_IA32_MCG_EFLAGS            0x00000188
-#define MSR_IA32_MCG_EIP               0x00000189
-#define MSR_IA32_MCG_RESERVED          0x0000018a
-
-/* Pentium IV performance counter MSRs */
-#define MSR_P4_BPU_PERFCTR0            0x00000300
-#define MSR_P4_BPU_PERFCTR1            0x00000301
-#define MSR_P4_BPU_PERFCTR2            0x00000302
-#define MSR_P4_BPU_PERFCTR3            0x00000303
-#define MSR_P4_MS_PERFCTR0             0x00000304
-#define MSR_P4_MS_PERFCTR1             0x00000305
-#define MSR_P4_MS_PERFCTR2             0x00000306
-#define MSR_P4_MS_PERFCTR3             0x00000307
-#define MSR_P4_FLAME_PERFCTR0          0x00000308
-#define MSR_P4_FLAME_PERFCTR1          0x00000309
-#define MSR_P4_FLAME_PERFCTR2          0x0000030a
-#define MSR_P4_FLAME_PERFCTR3          0x0000030b
-#define MSR_P4_IQ_PERFCTR0             0x0000030c
-#define MSR_P4_IQ_PERFCTR1             0x0000030d
-#define MSR_P4_IQ_PERFCTR2             0x0000030e
-#define MSR_P4_IQ_PERFCTR3             0x0000030f
-#define MSR_P4_IQ_PERFCTR4             0x00000310
-#define MSR_P4_IQ_PERFCTR5             0x00000311
-#define MSR_P4_BPU_CCCR0               0x00000360
-#define MSR_P4_BPU_CCCR1               0x00000361
-#define MSR_P4_BPU_CCCR2               0x00000362
-#define MSR_P4_BPU_CCCR3               0x00000363
-#define MSR_P4_MS_CCCR0                        0x00000364
-#define MSR_P4_MS_CCCR1                        0x00000365
-#define MSR_P4_MS_CCCR2                        0x00000366
-#define MSR_P4_MS_CCCR3                        0x00000367
-#define MSR_P4_FLAME_CCCR0             0x00000368
-#define MSR_P4_FLAME_CCCR1             0x00000369
-#define MSR_P4_FLAME_CCCR2             0x0000036a
-#define MSR_P4_FLAME_CCCR3             0x0000036b
-#define MSR_P4_IQ_CCCR0                        0x0000036c
-#define MSR_P4_IQ_CCCR1                        0x0000036d
-#define MSR_P4_IQ_CCCR2                        0x0000036e
-#define MSR_P4_IQ_CCCR3                        0x0000036f
-#define MSR_P4_IQ_CCCR4                        0x00000370
-#define MSR_P4_IQ_CCCR5                        0x00000371
-#define MSR_P4_ALF_ESCR0               0x000003ca
-#define MSR_P4_ALF_ESCR1               0x000003cb
-#define MSR_P4_BPU_ESCR0               0x000003b2
-#define MSR_P4_BPU_ESCR1               0x000003b3
-#define MSR_P4_BSU_ESCR0               0x000003a0
-#define MSR_P4_BSU_ESCR1               0x000003a1
-#define MSR_P4_CRU_ESCR0               0x000003b8
-#define MSR_P4_CRU_ESCR1               0x000003b9
-#define MSR_P4_CRU_ESCR2               0x000003cc
-#define MSR_P4_CRU_ESCR3               0x000003cd
-#define MSR_P4_CRU_ESCR4               0x000003e0
-#define MSR_P4_CRU_ESCR5               0x000003e1
-#define MSR_P4_DAC_ESCR0               0x000003a8
-#define MSR_P4_DAC_ESCR1               0x000003a9
-#define MSR_P4_FIRM_ESCR0              0x000003a4
-#define MSR_P4_FIRM_ESCR1              0x000003a5
-#define MSR_P4_FLAME_ESCR0             0x000003a6
-#define MSR_P4_FLAME_ESCR1             0x000003a7
-#define MSR_P4_FSB_ESCR0               0x000003a2
-#define MSR_P4_FSB_ESCR1               0x000003a3
-#define MSR_P4_IQ_ESCR0                        0x000003ba
-#define MSR_P4_IQ_ESCR1                        0x000003bb
-#define MSR_P4_IS_ESCR0                        0x000003b4
-#define MSR_P4_IS_ESCR1                        0x000003b5
-#define MSR_P4_ITLB_ESCR0              0x000003b6
-#define MSR_P4_ITLB_ESCR1              0x000003b7
-#define MSR_P4_IX_ESCR0                        0x000003c8
-#define MSR_P4_IX_ESCR1                        0x000003c9
-#define MSR_P4_MOB_ESCR0               0x000003aa
-#define MSR_P4_MOB_ESCR1               0x000003ab
-#define MSR_P4_MS_ESCR0                        0x000003c0
-#define MSR_P4_MS_ESCR1                        0x000003c1
-#define MSR_P4_PMH_ESCR0               0x000003ac
-#define MSR_P4_PMH_ESCR1               0x000003ad
-#define MSR_P4_RAT_ESCR0               0x000003bc
-#define MSR_P4_RAT_ESCR1               0x000003bd
-#define MSR_P4_SAAT_ESCR0              0x000003ae
-#define MSR_P4_SAAT_ESCR1              0x000003af
-#define MSR_P4_SSU_ESCR0               0x000003be
-#define MSR_P4_SSU_ESCR1               0x000003bf /* guess: not in manual */
-
-#define MSR_P4_TBPU_ESCR0              0x000003c2
-#define MSR_P4_TBPU_ESCR1              0x000003c3
-#define MSR_P4_TC_ESCR0                        0x000003c4
-#define MSR_P4_TC_ESCR1                        0x000003c5
-#define MSR_P4_U2L_ESCR0               0x000003b0
-#define MSR_P4_U2L_ESCR1               0x000003b1
-
-#define MSR_P4_PEBS_MATRIX_VERT                0x000003f2
-
-/* Intel Core-based CPU performance counters */
-#define MSR_CORE_PERF_FIXED_CTR0       0x00000309
-#define MSR_CORE_PERF_FIXED_CTR1       0x0000030a
-#define MSR_CORE_PERF_FIXED_CTR2       0x0000030b
-#define MSR_CORE_PERF_FIXED_CTR_CTRL   0x0000038d
-#define MSR_CORE_PERF_GLOBAL_STATUS    0x0000038e
-#define MSR_CORE_PERF_GLOBAL_CTRL      0x0000038f
-#define MSR_CORE_PERF_GLOBAL_OVF_CTRL  0x00000390
-
-/* Geode defined MSRs */
-#define MSR_GEODE_BUSCONT_CONF0                0x00001900
-
-/* Intel VT MSRs */
-#define MSR_IA32_VMX_BASIC              0x00000480
-#define MSR_IA32_VMX_PINBASED_CTLS      0x00000481
-#define MSR_IA32_VMX_PROCBASED_CTLS     0x00000482
-#define MSR_IA32_VMX_EXIT_CTLS          0x00000483
-#define MSR_IA32_VMX_ENTRY_CTLS         0x00000484
-#define MSR_IA32_VMX_MISC               0x00000485
-#define MSR_IA32_VMX_CR0_FIXED0         0x00000486
-#define MSR_IA32_VMX_CR0_FIXED1         0x00000487
-#define MSR_IA32_VMX_CR4_FIXED0         0x00000488
-#define MSR_IA32_VMX_CR4_FIXED1         0x00000489
-#define MSR_IA32_VMX_VMCS_ENUM          0x0000048a
-#define MSR_IA32_VMX_PROCBASED_CTLS2    0x0000048b
-#define MSR_IA32_VMX_EPT_VPID_CAP       0x0000048c
-#define MSR_IA32_VMX_TRUE_PINBASED_CTLS  0x0000048d
-#define MSR_IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048e
-#define MSR_IA32_VMX_TRUE_EXIT_CTLS      0x0000048f
-#define MSR_IA32_VMX_TRUE_ENTRY_CTLS     0x00000490
-#define MSR_IA32_VMX_VMFUNC             0x00000491
-
-/* VMX_BASIC bits and bitmasks */
-#define VMX_BASIC_VMCS_SIZE_SHIFT      32
-#define VMX_BASIC_TRUE_CTLS            (1ULL << 55)
-#define VMX_BASIC_64           0x0001000000000000LLU
-#define VMX_BASIC_MEM_TYPE_SHIFT       50
-#define VMX_BASIC_MEM_TYPE_MASK        0x003c000000000000LLU
-#define VMX_BASIC_MEM_TYPE_WB  6LLU
-#define VMX_BASIC_INOUT                0x0040000000000000LLU
-
-/* MSR_IA32_VMX_MISC bits */
-#define MSR_IA32_VMX_MISC_VMWRITE_SHADOW_RO_FIELDS (1ULL << 29)
-#define MSR_IA32_VMX_MISC_PREEMPTION_TIMER_SCALE   0x1F
-/* AMD-V MSRs */
-
-#define MSR_VM_CR                       0xc0010114
-#define MSR_VM_IGNNE                    0xc0010115
-#define MSR_VM_HSAVE_PA                 0xc0010117
-
-#endif /* _ASM_X86_MSR_INDEX_H */
index 155e51048fa4067581ba8fbf8bafa108e193f8ec..c41f4fe25483ae7eed13eb9ee3e65a7aa9f6edbe 100644 (file)
@@ -1,8 +1,6 @@
 #ifndef _UAPI_ASM_X86_MSR_H
 #define _UAPI_ASM_X86_MSR_H
 
-#include <asm/msr-index.h>
-
 #ifndef __ASSEMBLY__
 
 #include <linux/types.h>
index d0acb658c8f43950807dbf39a57e84c73bf34f06..7528dcf59691652571283d186a52f50abe4270a6 100644 (file)
@@ -103,7 +103,7 @@ struct mtrr_state_type {
 #define MTRRIOC_GET_PAGE_ENTRY   _IOWR(MTRR_IOCTL_BASE, 8, struct mtrr_gentry)
 #define MTRRIOC_KILL_PAGE_ENTRY  _IOW(MTRR_IOCTL_BASE,  9, struct mtrr_sentry)
 
-/*  These are the region types  */
+/* MTRR memory types, which are defined in SDM */
 #define MTRR_TYPE_UNCACHABLE 0
 #define MTRR_TYPE_WRCOMB     1
 /*#define MTRR_TYPE_         2*/
@@ -113,5 +113,11 @@ struct mtrr_state_type {
 #define MTRR_TYPE_WRBACK     6
 #define MTRR_NUM_TYPES       7
 
+/*
+ * Invalid MTRR memory type.  mtrr_type_lookup() returns this value when
+ * MTRRs are disabled.  Note, this value is allocated from the reserved
+ * values (0x7-0xff) of the MTRR memory types.
+ */
+#define MTRR_TYPE_INVALID    0xff
 
 #endif /* _UAPI_ASM_X86_MTRR_H */
index 16dc4e8a2cd34845042445915f9e9d74d90546c6..0e8a973de9ee8aec0c555a5e9e8b23348e2cc10b 100644 (file)
@@ -25,7 +25,7 @@ struct _fpx_sw_bytes {
        __u32 extended_size;    /* total size of the layout referred by
                                 * fpstate pointer in the sigcontext.
                                 */
-       __u64 xstate_bv;
+       __u64 xfeatures;
                                /* feature bit mask (including fp/sse/extended
                                 * state) that is present in the memory
                                 * layout.
@@ -209,8 +209,8 @@ struct sigcontext {
 
 #endif /* !__i386__ */
 
-struct _xsave_hdr {
-       __u64 xstate_bv;
+struct _header {
+       __u64 xfeatures;
        __u64 reserved1[2];
        __u64 reserved2[5];
 };
@@ -228,7 +228,7 @@ struct _ymmh_state {
  */
 struct _xstate {
        struct _fpstate fpstate;
-       struct _xsave_hdr xstate_hdr;
+       struct _header xstate_hdr;
        struct _ymmh_state ymmh;
        /* new processor state extensions go here */
 };
index 9bcd0b56ca1775aa82a9dee3a47614461bb7a881..0f15af41bd80b764c80f90f0153a96e136a2106e 100644 (file)
@@ -22,7 +22,7 @@ KASAN_SANITIZE_dumpstack_$(BITS).o := n
 
 CFLAGS_irq.o := -I$(src)/../include/asm/trace
 
-obj-y                  := process_$(BITS).o signal.o entry_$(BITS).o
+obj-y                  := process_$(BITS).o signal.o
 obj-y                  += traps.o irq.o irq_$(BITS).o dumpstack_$(BITS).o
 obj-y                  += time.o ioport.o ldt.o dumpstack.o nmi.o
 obj-y                  += setup.o x86_init.o i8259.o irqinit.o jump_label.o
@@ -31,9 +31,6 @@ obj-y                 += probe_roms.o
 obj-$(CONFIG_X86_32)   += i386_ksyms_32.o
 obj-$(CONFIG_X86_64)   += sys_x86_64.o x8664_ksyms_64.o
 obj-$(CONFIG_X86_64)   += mcount_64.o
-obj-y                  += syscall_$(BITS).o vsyscall_gtod.o
-obj-$(CONFIG_IA32_EMULATION)   += syscall_32.o
-obj-$(CONFIG_X86_VSYSCALL_EMULATION)   += vsyscall_64.o vsyscall_emu_64.o
 obj-$(CONFIG_X86_ESPFIX64)     += espfix_64.o
 obj-$(CONFIG_SYSFS)    += ksysfs.o
 obj-y                  += bootflag.o e820.o
@@ -44,7 +41,7 @@ obj-y                 += pci-iommu_table.o
 obj-y                  += resource.o
 
 obj-y                          += process.o
-obj-y                          += i387.o xsave.o
+obj-y                          += fpu/
 obj-y                          += ptrace.o
 obj-$(CONFIG_X86_32)           += tls.o
 obj-$(CONFIG_IA32_EMULATION)   += tls.o
index dbe76a14c3c9909e50ad51ff14e2eefd31cdf101..e49ee24da85e17b247b8decc355dd15393e8f1ad 100644 (file)
 #include <linux/module.h>
 #include <linux/dmi.h>
 #include <linux/irq.h>
-#include <linux/irqdomain.h>
 #include <linux/slab.h>
 #include <linux/bootmem.h>
 #include <linux/ioport.h>
 #include <linux/pci.h>
 
+#include <asm/irqdomain.h>
 #include <asm/pci_x86.h>
 #include <asm/pgtable.h>
 #include <asm/io_apic.h>
@@ -400,57 +400,13 @@ static int mp_config_acpi_gsi(struct device *dev, u32 gsi, int trigger,
        return 0;
 }
 
-static int mp_register_gsi(struct device *dev, u32 gsi, int trigger,
-                          int polarity)
-{
-       int irq, node;
-
-       if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
-               return gsi;
-
-       trigger = trigger == ACPI_EDGE_SENSITIVE ? 0 : 1;
-       polarity = polarity == ACPI_ACTIVE_HIGH ? 0 : 1;
-       node = dev ? dev_to_node(dev) : NUMA_NO_NODE;
-       if (mp_set_gsi_attr(gsi, trigger, polarity, node)) {
-               pr_warn("Failed to set pin attr for GSI%d\n", gsi);
-               return -1;
-       }
-
-       irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC);
-       if (irq < 0)
-               return irq;
-
-       /* Don't set up the ACPI SCI because it's already set up */
-       if (enable_update_mptable && acpi_gbl_FADT.sci_interrupt != gsi)
-               mp_config_acpi_gsi(dev, gsi, trigger, polarity);
-
-       return irq;
-}
-
-static void mp_unregister_gsi(u32 gsi)
-{
-       int irq;
-
-       if (acpi_irq_model != ACPI_IRQ_MODEL_IOAPIC)
-               return;
-
-       irq = mp_map_gsi_to_irq(gsi, 0);
-       if (irq > 0)
-               mp_unmap_irq(irq);
-}
-
-static struct irq_domain_ops acpi_irqdomain_ops = {
-       .map = mp_irqdomain_map,
-       .unmap = mp_irqdomain_unmap,
-};
-
 static int __init
 acpi_parse_ioapic(struct acpi_subtable_header * header, const unsigned long end)
 {
        struct acpi_madt_io_apic *ioapic = NULL;
        struct ioapic_domain_cfg cfg = {
                .type = IOAPIC_DOMAIN_DYNAMIC,
-               .ops = &acpi_irqdomain_ops,
+               .ops = &mp_ioapic_irqdomain_ops,
        };
 
        ioapic = (struct acpi_madt_io_apic *)header;
@@ -652,7 +608,7 @@ static int acpi_register_gsi_pic(struct device *dev, u32 gsi,
         * Make sure all (legacy) PCI IRQs are set as level-triggered.
         */
        if (trigger == ACPI_LEVEL_SENSITIVE)
-               eisa_set_level_irq(gsi);
+               elcr_set_level_irq(gsi);
 #endif
 
        return gsi;
@@ -663,10 +619,21 @@ static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi,
                                    int trigger, int polarity)
 {
        int irq = gsi;
-
 #ifdef CONFIG_X86_IO_APIC
+       int node;
+       struct irq_alloc_info info;
+
+       node = dev ? dev_to_node(dev) : NUMA_NO_NODE;
+       trigger = trigger == ACPI_EDGE_SENSITIVE ? 0 : 1;
+       polarity = polarity == ACPI_ACTIVE_HIGH ? 0 : 1;
+       ioapic_set_alloc_attr(&info, node, trigger, polarity);
+
        mutex_lock(&acpi_ioapic_lock);
-       irq = mp_register_gsi(dev, gsi, trigger, polarity);
+       irq = mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info);
+       /* Don't set up the ACPI SCI because it's already set up */
+       if (irq >= 0 && enable_update_mptable &&
+           acpi_gbl_FADT.sci_interrupt != gsi)
+               mp_config_acpi_gsi(dev, gsi, trigger, polarity);
        mutex_unlock(&acpi_ioapic_lock);
 #endif
 
@@ -676,8 +643,12 @@ static int acpi_register_gsi_ioapic(struct device *dev, u32 gsi,
 static void acpi_unregister_gsi_ioapic(u32 gsi)
 {
 #ifdef CONFIG_X86_IO_APIC
+       int irq;
+
        mutex_lock(&acpi_ioapic_lock);
-       mp_unregister_gsi(gsi);
+       irq = mp_map_gsi_to_irq(gsi, 0, NULL);
+       if (irq > 0)
+               mp_unmap_irq(irq);
        mutex_unlock(&acpi_ioapic_lock);
 #endif
 }
@@ -786,7 +757,7 @@ int acpi_register_ioapic(acpi_handle handle, u64 phys_addr, u32 gsi_base)
        u64 addr;
        struct ioapic_domain_cfg cfg = {
                .type = IOAPIC_DOMAIN_DYNAMIC,
-               .ops = &acpi_irqdomain_ops,
+               .ops = &mp_ioapic_irqdomain_ops,
        };
 
        ioapic_id = acpi_get_ioapic_id(handle, gsi_base, &addr);
index ae693b51ed8ed589660570a77cfb5654af924388..8c35df4681041eee92f14020de8f795a8b7d710c 100644 (file)
@@ -62,7 +62,7 @@ ENTRY(do_suspend_lowlevel)
        pushfq
        popq    pt_regs_flags(%rax)
 
-       movq    $resume_point, saved_rip(%rip)
+       movq    $.Lresume_point, saved_rip(%rip)
 
        movq    %rsp, saved_rsp
        movq    %rbp, saved_rbp
@@ -75,10 +75,10 @@ ENTRY(do_suspend_lowlevel)
        xorl    %eax, %eax
        call    x86_acpi_enter_sleep_state
        /* in case something went wrong, restore the machine status and go on */
-       jmp     resume_point
+       jmp     .Lresume_point
 
        .align 4
-resume_point:
+.Lresume_point:
        /* We don't restore %rax, it must be 0 anyway */
        movq    $saved_context, %rax
        movq    saved_context_cr4(%rax), %rbx
index aef65319316065eab845f35141682c3550f18a22..c42827eb86cf0c52c36389d0c26dc937776b03bf 100644 (file)
 #include <asm/io.h>
 #include <asm/fixmap.h>
 
+int __read_mostly alternatives_patched;
+
+EXPORT_SYMBOL_GPL(alternatives_patched);
+
 #define MAX_PATCH_LEN (255-1)
 
 static int __initdata_or_module debug_alternative;
@@ -227,6 +231,15 @@ void __init arch_init_ideal_nops(void)
 #endif
                }
                break;
+
+       case X86_VENDOR_AMD:
+               if (boot_cpu_data.x86 > 0xf) {
+                       ideal_nops = p6_nops;
+                       return;
+               }
+
+               /* fall through */
+
        default:
 #ifdef CONFIG_X86_64
                ideal_nops = k8_nops;
@@ -627,6 +640,7 @@ void __init alternative_instructions(void)
        apply_paravirt(__parainstructions, __parainstructions_end);
 
        restart_nmi();
+       alternatives_patched = 1;
 }
 
 /**
index 5caed1dd7ccf89e6595fc4a7db7d1d5b7f92f0a6..29fa475ec51823e61a9e94f34142e37341a1208b 100644 (file)
@@ -89,9 +89,7 @@ int amd_cache_northbridges(void)
                        next_northbridge(link, amd_nb_link_ids);
        }
 
-       /* GART present only on Fam15h upto model 0fh */
-       if (boot_cpu_data.x86 == 0xf || boot_cpu_data.x86 == 0x10 ||
-           (boot_cpu_data.x86 == 0x15 && boot_cpu_data.x86_model < 0x10))
+       if (amd_gart_present())
                amd_northbridges.flags |= AMD_NB_GART;
 
        /*
index 6a7c23ff21d3de8ccc906b41bceec57caaea414e..ede92c3364d3277fc3ea378695030eea47ca9fc8 100644 (file)
@@ -171,10 +171,6 @@ static int __init apbt_clockevent_register(void)
 
 static void apbt_setup_irq(struct apbt_dev *adev)
 {
-       /* timer0 irq has been setup early */
-       if (adev->irq == 0)
-               return;
-
        irq_modify_status(adev->irq, 0, IRQ_MOVE_PCNTXT);
        irq_set_affinity(adev->irq, cpumask_of(adev->cpu));
 }
index 76164e173a24f7251e3e684e66aa8afd7792f1e1..6e85f713641dda77bd2d9dfeb99fa10cdbe0cb0d 100644 (file)
@@ -262,6 +262,9 @@ void __init early_gart_iommu_check(void)
        u64 aper_base = 0, last_aper_base = 0;
        int aper_enabled = 0, last_aper_enabled = 0, last_valid = 0;
 
+       if (!amd_gart_present())
+               return;
+
        if (!early_pci_allowed())
                return;
 
@@ -355,6 +358,9 @@ int __init gart_iommu_hole_init(void)
        int fix, slot, valid_agp = 0;
        int i, node;
 
+       if (!amd_gart_present())
+               return -ENODEV;
+
        if (gart_iommu_aperture_disabled || !fix_aperture ||
            !early_pci_allowed())
                return -ENODEV;
@@ -452,7 +458,7 @@ out:
                   force_iommu ||
                   valid_agp ||
                   fallback_aper_force) {
-               pr_info("Your BIOS doesn't leave a aperture memory hole\n");
+               pr_info("Your BIOS doesn't leave an aperture memory hole\n");
                pr_info("Please enable the IOMMU option in the BIOS setup\n");
                pr_info("This costs you %dMB of RAM\n",
                        32 << fallback_aper_order);
index 816f36e979ad03c6b052b1ec5c1ca34b6dc566e4..ae50d3454d7874e98f1e71e3402fb786dacb67c7 100644 (file)
@@ -3,6 +3,8 @@
  *
  * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
  *     Moved from arch/x86/kernel/apic/io_apic.c.
+ * Jiang Liu <jiang.liu@linux.intel.com>
+ *     Add support of hierarchical irqdomain
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
 #include <linux/device.h>
 #include <linux/pci.h>
 #include <linux/htirq.h>
+#include <asm/irqdomain.h>
 #include <asm/hw_irq.h>
 #include <asm/apic.h>
 #include <asm/hypertransport.h>
 
+static struct irq_domain *htirq_domain;
+
 /*
  * Hypertransport interrupt support
  */
-static void target_ht_irq(unsigned int irq, unsigned int dest, u8 vector)
-{
-       struct ht_irq_msg msg;
-
-       fetch_ht_irq_msg(irq, &msg);
-
-       msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK | HT_IRQ_LOW_DEST_ID_MASK);
-       msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
-
-       msg.address_lo |= HT_IRQ_LOW_VECTOR(vector) | HT_IRQ_LOW_DEST_ID(dest);
-       msg.address_hi |= HT_IRQ_HIGH_DEST_ID(dest);
-
-       write_ht_irq_msg(irq, &msg);
-}
-
 static int
 ht_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
 {
-       struct irq_cfg *cfg = irqd_cfg(data);
-       unsigned int dest;
+       struct irq_data *parent = data->parent_data;
        int ret;
 
-       ret = apic_set_affinity(data, mask, &dest);
-       if (ret)
-               return ret;
-
-       target_ht_irq(data->irq, dest, cfg->vector);
-       return IRQ_SET_MASK_OK_NOCOPY;
+       ret = parent->chip->irq_set_affinity(parent, mask, force);
+       if (ret >= 0) {
+               struct ht_irq_msg msg;
+               struct irq_cfg *cfg = irqd_cfg(data);
+
+               fetch_ht_irq_msg(data->irq, &msg);
+               msg.address_lo &= ~(HT_IRQ_LOW_VECTOR_MASK |
+                                   HT_IRQ_LOW_DEST_ID_MASK);
+               msg.address_lo |= HT_IRQ_LOW_VECTOR(cfg->vector) |
+                                 HT_IRQ_LOW_DEST_ID(cfg->dest_apicid);
+               msg.address_hi &= ~(HT_IRQ_HIGH_DEST_ID_MASK);
+               msg.address_hi |= HT_IRQ_HIGH_DEST_ID(cfg->dest_apicid);
+               write_ht_irq_msg(data->irq, &msg);
+       }
+
+       return ret;
 }
 
 static struct irq_chip ht_irq_chip = {
        .name                   = "PCI-HT",
        .irq_mask               = mask_ht_irq,
        .irq_unmask             = unmask_ht_irq,
-       .irq_ack                = apic_ack_edge,
+       .irq_ack                = irq_chip_ack_parent,
        .irq_set_affinity       = ht_set_affinity,
-       .irq_retrigger          = apic_retrigger_irq,
+       .irq_retrigger          = irq_chip_retrigger_hierarchy,
        .flags                  = IRQCHIP_SKIP_SET_WAKE,
 };
 
-int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
+static int htirq_domain_alloc(struct irq_domain *domain, unsigned int virq,
+                             unsigned int nr_irqs, void *arg)
 {
-       struct irq_cfg *cfg;
-       struct ht_irq_msg msg;
-       unsigned dest;
-       int err;
+       struct ht_irq_cfg *ht_cfg;
+       struct irq_alloc_info *info = arg;
+       struct pci_dev *dev;
+       irq_hw_number_t hwirq;
+       int ret;
 
-       if (disable_apic)
-               return -ENXIO;
+       if (nr_irqs > 1 || !info)
+               return -EINVAL;
 
-       cfg = irq_cfg(irq);
-       err = assign_irq_vector(irq, cfg, apic->target_cpus());
-       if (err)
-               return err;
+       dev = info->ht_dev;
+       hwirq = (info->ht_idx & 0xFF) |
+               PCI_DEVID(dev->bus->number, dev->devfn) << 8 |
+               (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 24;
+       if (irq_find_mapping(domain, hwirq) > 0)
+               return -EEXIST;
 
-       err = apic->cpu_mask_to_apicid_and(cfg->domain,
-                                          apic->target_cpus(), &dest);
-       if (err)
-               return err;
+       ht_cfg = kmalloc(sizeof(*ht_cfg), GFP_KERNEL);
+       if (!ht_cfg)
+               return -ENOMEM;
 
-       msg.address_hi = HT_IRQ_HIGH_DEST_ID(dest);
+       ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, info);
+       if (ret < 0) {
+               kfree(ht_cfg);
+               return ret;
+       }
+
+       /* Initialize msg to a value that will never match the first write. */
+       ht_cfg->msg.address_lo = 0xffffffff;
+       ht_cfg->msg.address_hi = 0xffffffff;
+       ht_cfg->dev = info->ht_dev;
+       ht_cfg->update = info->ht_update;
+       ht_cfg->pos = info->ht_pos;
+       ht_cfg->idx = 0x10 + (info->ht_idx * 2);
+       irq_domain_set_info(domain, virq, hwirq, &ht_irq_chip, ht_cfg,
+                           handle_edge_irq, ht_cfg, "edge");
+
+       return 0;
+}
+
+static void htirq_domain_free(struct irq_domain *domain, unsigned int virq,
+                             unsigned int nr_irqs)
+{
+       struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
+
+       BUG_ON(nr_irqs != 1);
+       kfree(irq_data->chip_data);
+       irq_domain_free_irqs_top(domain, virq, nr_irqs);
+}
 
+static void htirq_domain_activate(struct irq_domain *domain,
+                                 struct irq_data *irq_data)
+{
+       struct ht_irq_msg msg;
+       struct irq_cfg *cfg = irqd_cfg(irq_data);
+
+       msg.address_hi = HT_IRQ_HIGH_DEST_ID(cfg->dest_apicid);
        msg.address_lo =
                HT_IRQ_LOW_BASE |
-               HT_IRQ_LOW_DEST_ID(dest) |
+               HT_IRQ_LOW_DEST_ID(cfg->dest_apicid) |
                HT_IRQ_LOW_VECTOR(cfg->vector) |
                ((apic->irq_dest_mode == 0) ?
                        HT_IRQ_LOW_DM_PHYSICAL :
@@ -95,13 +131,56 @@ int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev)
                        HT_IRQ_LOW_MT_FIXED :
                        HT_IRQ_LOW_MT_ARBITRATED) |
                HT_IRQ_LOW_IRQ_MASKED;
+       write_ht_irq_msg(irq_data->irq, &msg);
+}
 
-       write_ht_irq_msg(irq, &msg);
+static void htirq_domain_deactivate(struct irq_domain *domain,
+                                   struct irq_data *irq_data)
+{
+       struct ht_irq_msg msg;
 
-       irq_set_chip_and_handler_name(irq, &ht_irq_chip,
-                                     handle_edge_irq, "edge");
+       memset(&msg, 0, sizeof(msg));
+       write_ht_irq_msg(irq_data->irq, &msg);
+}
 
-       dev_dbg(&dev->dev, "irq %d for HT\n", irq);
+static const struct irq_domain_ops htirq_domain_ops = {
+       .alloc          = htirq_domain_alloc,
+       .free           = htirq_domain_free,
+       .activate       = htirq_domain_activate,
+       .deactivate     = htirq_domain_deactivate,
+};
 
-       return 0;
+void arch_init_htirq_domain(struct irq_domain *parent)
+{
+       if (disable_apic)
+               return;
+
+       htirq_domain = irq_domain_add_tree(NULL, &htirq_domain_ops, NULL);
+       if (!htirq_domain)
+               pr_warn("failed to initialize irqdomain for HTIRQ.\n");
+       else
+               htirq_domain->parent = parent;
+}
+
+int arch_setup_ht_irq(int idx, int pos, struct pci_dev *dev,
+                     ht_irq_update_t *update)
+{
+       struct irq_alloc_info info;
+
+       if (!htirq_domain)
+               return -ENOSYS;
+
+       init_irq_alloc_info(&info, NULL);
+       info.ht_idx = idx;
+       info.ht_pos = pos;
+       info.ht_dev = dev;
+       info.ht_update = update;
+
+       return irq_domain_alloc_irqs(htirq_domain, 1, dev_to_node(&dev->dev),
+                                    &info);
+}
+
+void arch_teardown_ht_irq(unsigned int irq)
+{
+       irq_domain_free_irqs(irq, 1);
 }
index f4dc2462a1ac410803cd94ff4944ebf23c636fa9..845dc0df2002472275a39e421502cdb0768c54a1 100644 (file)
  *                                     and Rolf G. Tews
  *                                     for testing these extensively
  *     Paul Diefenbaugh        :       Added full ACPI support
+ *
+ * Historical information which is worth to be preserved:
+ *
+ * - SiS APIC rmw bug:
+ *
+ *     We used to have a workaround for a bug in SiS chips which
+ *     required to rewrite the index register for a read-modify-write
+ *     operation as the chip lost the index information which was
+ *     setup for the read already. We cache the data now, so that
+ *     workaround has been removed.
  */
 
 #include <linux/mm.h>
 #include <linux/acpi.h>
 #include <linux/module.h>
 #include <linux/syscore_ops.h>
-#include <linux/irqdomain.h>
 #include <linux/freezer.h>
 #include <linux/kthread.h>
 #include <linux/jiffies.h>     /* time_after() */
 #include <linux/slab.h>
 #include <linux/bootmem.h>
 
+#include <asm/irqdomain.h>
 #include <asm/idle.h>
 #include <asm/io.h>
 #include <asm/smp.h>
 #define        for_each_ioapic_pin(idx, pin)   \
        for_each_ioapic((idx))          \
                for_each_pin((idx), (pin))
-
 #define for_each_irq_pin(entry, head) \
        list_for_each_entry(entry, &head, list)
 
-/*
- *      Is the SiS APIC rmw bug present ?
- *      -1 = don't know, 0 = no, 1 = yes
- */
-int sis_apic_bug = -1;
-
 static DEFINE_RAW_SPINLOCK(ioapic_lock);
 static DEFINE_MUTEX(ioapic_mutex);
 static unsigned int ioapic_dynirq_base;
 static int ioapic_initialized;
 
-struct mp_pin_info {
+struct irq_pin_list {
+       struct list_head list;
+       int apic, pin;
+};
+
+struct mp_chip_data {
+       struct list_head irq_2_pin;
+       struct IO_APIC_route_entry entry;
        int trigger;
        int polarity;
-       int node;
-       int set;
        u32 count;
+       bool isa_irq;
+};
+
+struct mp_ioapic_gsi {
+       u32 gsi_base;
+       u32 gsi_end;
 };
 
 static struct ioapic {
@@ -101,7 +115,6 @@ static struct ioapic {
        struct mp_ioapic_gsi  gsi_config;
        struct ioapic_domain_cfg irqdomain_cfg;
        struct irq_domain *irqdomain;
-       struct mp_pin_info *pin_info;
        struct resource *iomem_res;
 } ioapics[MAX_IO_APICS];
 
@@ -117,7 +130,7 @@ unsigned int mpc_ioapic_addr(int ioapic_idx)
        return ioapics[ioapic_idx].mp_config.apicaddr;
 }
 
-struct mp_ioapic_gsi *mp_ioapic_gsi_routing(int ioapic_idx)
+static inline struct mp_ioapic_gsi *mp_ioapic_gsi_routing(int ioapic_idx)
 {
        return &ioapics[ioapic_idx].gsi_config;
 }
@@ -129,11 +142,16 @@ static inline int mp_ioapic_pin_count(int ioapic)
        return gsi_cfg->gsi_end - gsi_cfg->gsi_base + 1;
 }
 
-u32 mp_pin_to_gsi(int ioapic, int pin)
+static inline u32 mp_pin_to_gsi(int ioapic, int pin)
 {
        return mp_ioapic_gsi_routing(ioapic)->gsi_base + pin;
 }
 
+static inline bool mp_is_legacy_irq(int irq)
+{
+       return irq >= 0 && irq < nr_legacy_irqs();
+}
+
 /*
  * Initialize all legacy IRQs and all pins on the first IOAPIC
  * if we have legacy interrupt controller. Kernel boot option "pirq="
@@ -144,12 +162,7 @@ static inline int mp_init_irq_at_boot(int ioapic, int irq)
        if (!nr_legacy_irqs())
                return 0;
 
-       return ioapic == 0 || (irq >= 0 && irq < nr_legacy_irqs());
-}
-
-static inline struct mp_pin_info *mp_pin_info(int ioapic_idx, int pin)
-{
-       return ioapics[ioapic_idx].pin_info + pin;
+       return ioapic == 0 || mp_is_legacy_irq(irq);
 }
 
 static inline struct irq_domain *mp_ioapic_irqdomain(int ioapic)
@@ -216,16 +229,6 @@ void mp_save_irq(struct mpc_intsrc *m)
                panic("Max # of irq sources exceeded!!\n");
 }
 
-struct irq_pin_list {
-       struct list_head list;
-       int apic, pin;
-};
-
-static struct irq_pin_list *alloc_irq_pin_list(int node)
-{
-       return kzalloc_node(sizeof(struct irq_pin_list), GFP_KERNEL, node);
-}
-
 static void alloc_ioapic_saved_registers(int idx)
 {
        size_t size;
@@ -247,8 +250,7 @@ static void free_ioapic_saved_registers(int idx)
 
 int __init arch_early_ioapic_init(void)
 {
-       struct irq_cfg *cfg;
-       int i, node = cpu_to_node(0);
+       int i;
 
        if (!nr_legacy_irqs())
                io_apic_irqs = ~0UL;
@@ -256,16 +258,6 @@ int __init arch_early_ioapic_init(void)
        for_each_ioapic(i)
                alloc_ioapic_saved_registers(i);
 
-       /*
-        * For legacy IRQ's, start with assigning irq0 to irq15 to
-        * IRQ0_VECTOR to IRQ15_VECTOR for all cpu's.
-        */
-       for (i = 0; i < nr_legacy_irqs(); i++) {
-               cfg = alloc_irq_and_cfg_at(i, node);
-               cfg->vector = IRQ0_VECTOR + i;
-               cpumask_setall(cfg->domain);
-       }
-
        return 0;
 }
 
@@ -283,7 +275,7 @@ static __attribute_const__ struct io_apic __iomem *io_apic_base(int idx)
                + (mpc_ioapic_addr(idx) & ~PAGE_MASK);
 }
 
-void io_apic_eoi(unsigned int apic, unsigned int vector)
+static inline void io_apic_eoi(unsigned int apic, unsigned int vector)
 {
        struct io_apic __iomem *io_apic = io_apic_base(apic);
        writel(vector, &io_apic->eoi);
@@ -296,7 +288,8 @@ unsigned int native_io_apic_read(unsigned int apic, unsigned int reg)
        return readl(&io_apic->data);
 }
 
-void native_io_apic_write(unsigned int apic, unsigned int reg, unsigned int value)
+static void io_apic_write(unsigned int apic, unsigned int reg,
+                         unsigned int value)
 {
        struct io_apic __iomem *io_apic = io_apic_base(apic);
 
@@ -304,21 +297,6 @@ void native_io_apic_write(unsigned int apic, unsigned int reg, unsigned int valu
        writel(value, &io_apic->data);
 }
 
-/*
- * Re-write a value: to be used for read-modify-write
- * cycles where the read already set up the index register.
- *
- * Older SiS APIC requires we rewrite the index register
- */
-void native_io_apic_modify(unsigned int apic, unsigned int reg, unsigned int value)
-{
-       struct io_apic __iomem *io_apic = io_apic_base(apic);
-
-       if (sis_apic_bug)
-               writel(reg, &io_apic->index);
-       writel(value, &io_apic->data);
-}
-
 union entry_union {
        struct { u32 w1, w2; };
        struct IO_APIC_route_entry entry;
@@ -378,7 +356,7 @@ static void ioapic_write_entry(int apic, int pin, struct IO_APIC_route_entry e)
 static void ioapic_mask_entry(int apic, int pin)
 {
        unsigned long flags;
-       union entry_union eu = { .entry.mask = 1 };
+       union entry_union eu = { .entry.mask = IOAPIC_MASKED };
 
        raw_spin_lock_irqsave(&ioapic_lock, flags);
        io_apic_write(apic, 0x10 + 2*pin, eu.w1);
@@ -391,16 +369,17 @@ static void ioapic_mask_entry(int apic, int pin)
  * shared ISA-space IRQs, so we have to support them. We are super
  * fast in the common case, and fast for shared ISA-space IRQs.
  */
-static int __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
+static int __add_pin_to_irq_node(struct mp_chip_data *data,
+                                int node, int apic, int pin)
 {
        struct irq_pin_list *entry;
 
        /* don't allow duplicates */
-       for_each_irq_pin(entry, cfg->irq_2_pin)
+       for_each_irq_pin(entry, data->irq_2_pin)
                if (entry->apic == apic && entry->pin == pin)
                        return 0;
 
-       entry = alloc_irq_pin_list(node);
+       entry = kzalloc_node(sizeof(struct irq_pin_list), GFP_ATOMIC, node);
        if (!entry) {
                pr_err("can not alloc irq_pin_list (%d,%d,%d)\n",
                       node, apic, pin);
@@ -408,16 +387,16 @@ static int __add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pi
        }
        entry->apic = apic;
        entry->pin = pin;
+       list_add_tail(&entry->list, &data->irq_2_pin);
 
-       list_add_tail(&entry->list, &cfg->irq_2_pin);
        return 0;
 }
 
-static void __remove_pin_from_irq(struct irq_cfg *cfg, int apic, int pin)
+static void __remove_pin_from_irq(struct mp_chip_data *data, int apic, int pin)
 {
        struct irq_pin_list *tmp, *entry;
 
-       list_for_each_entry_safe(entry, tmp, &cfg->irq_2_pin, list)
+       list_for_each_entry_safe(entry, tmp, &data->irq_2_pin, list)
                if (entry->apic == apic && entry->pin == pin) {
                        list_del(&entry->list);
                        kfree(entry);
@@ -425,22 +404,23 @@ static void __remove_pin_from_irq(struct irq_cfg *cfg, int apic, int pin)
                }
 }
 
-static void add_pin_to_irq_node(struct irq_cfg *cfg, int node, int apic, int pin)
+static void add_pin_to_irq_node(struct mp_chip_data *data,
+                               int node, int apic, int pin)
 {
-       if (__add_pin_to_irq_node(cfg, node, apic, pin))
+       if (__add_pin_to_irq_node(data, node, apic, pin))
                panic("IO-APIC: failed to add irq-pin. Can not proceed\n");
 }
 
 /*
  * Reroute an IRQ to a different pin.
  */
-static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node,
+static void __init replace_pin_at_irq_node(struct mp_chip_data *data, int node,
                                           int oldapic, int oldpin,
                                           int newapic, int newpin)
 {
        struct irq_pin_list *entry;
 
-       for_each_irq_pin(entry, cfg->irq_2_pin) {
+       for_each_irq_pin(entry, data->irq_2_pin) {
                if (entry->apic == oldapic && entry->pin == oldpin) {
                        entry->apic = newapic;
                        entry->pin = newpin;
@@ -450,32 +430,26 @@ static void __init replace_pin_at_irq_node(struct irq_cfg *cfg, int node,
        }
 
        /* old apic/pin didn't exist, so just add new ones */
-       add_pin_to_irq_node(cfg, node, newapic, newpin);
-}
-
-static void __io_apic_modify_irq(struct irq_pin_list *entry,
-                                int mask_and, int mask_or,
-                                void (*final)(struct irq_pin_list *entry))
-{
-       unsigned int reg, pin;
-
-       pin = entry->pin;
-       reg = io_apic_read(entry->apic, 0x10 + pin * 2);
-       reg &= mask_and;
-       reg |= mask_or;
-       io_apic_modify(entry->apic, 0x10 + pin * 2, reg);
-       if (final)
-               final(entry);
+       add_pin_to_irq_node(data, node, newapic, newpin);
 }
 
-static void io_apic_modify_irq(struct irq_cfg *cfg,
+static void io_apic_modify_irq(struct mp_chip_data *data,
                               int mask_and, int mask_or,
                               void (*final)(struct irq_pin_list *entry))
 {
+       union entry_union eu;
        struct irq_pin_list *entry;
 
-       for_each_irq_pin(entry, cfg->irq_2_pin)
-               __io_apic_modify_irq(entry, mask_and, mask_or, final);
+       eu.entry = data->entry;
+       eu.w1 &= mask_and;
+       eu.w1 |= mask_or;
+       data->entry = eu.entry;
+
+       for_each_irq_pin(entry, data->irq_2_pin) {
+               io_apic_write(entry->apic, 0x10 + 2 * entry->pin, eu.w1);
+               if (final)
+                       final(entry);
+       }
 }
 
 static void io_apic_sync(struct irq_pin_list *entry)
@@ -490,39 +464,31 @@ static void io_apic_sync(struct irq_pin_list *entry)
        readl(&io_apic->data);
 }
 
-static void mask_ioapic(struct irq_cfg *cfg)
+static void mask_ioapic_irq(struct irq_data *irq_data)
 {
+       struct mp_chip_data *data = irq_data->chip_data;
        unsigned long flags;
 
        raw_spin_lock_irqsave(&ioapic_lock, flags);
-       io_apic_modify_irq(cfg, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
+       io_apic_modify_irq(data, ~0, IO_APIC_REDIR_MASKED, &io_apic_sync);
        raw_spin_unlock_irqrestore(&ioapic_lock, flags);
 }
 
-static void mask_ioapic_irq(struct irq_data *data)
+static void __unmask_ioapic(struct mp_chip_data *data)
 {
-       mask_ioapic(irqd_cfg(data));
+       io_apic_modify_irq(data, ~IO_APIC_REDIR_MASKED, 0, NULL);
 }
 
-static void __unmask_ioapic(struct irq_cfg *cfg)
-{
-       io_apic_modify_irq(cfg, ~IO_APIC_REDIR_MASKED, 0, NULL);
-}
-
-static void unmask_ioapic(struct irq_cfg *cfg)
+static void unmask_ioapic_irq(struct irq_data *irq_data)
 {
+       struct mp_chip_data *data = irq_data->chip_data;
        unsigned long flags;
 
        raw_spin_lock_irqsave(&ioapic_lock, flags);
-       __unmask_ioapic(cfg);
+       __unmask_ioapic(data);
        raw_spin_unlock_irqrestore(&ioapic_lock, flags);
 }
 
-static void unmask_ioapic_irq(struct irq_data *data)
-{
-       unmask_ioapic(irqd_cfg(data));
-}
-
 /*
  * IO-APIC versions below 0x20 don't support EOI register.
  * For the record, here is the information about various versions:
@@ -539,7 +505,7 @@ static void unmask_ioapic_irq(struct irq_data *data)
  * Otherwise, we simulate the EOI message manually by changing the trigger
  * mode to edge and then back to level, with RTE being masked during this.
  */
-void native_eoi_ioapic_pin(int apic, int pin, int vector)
+static void __eoi_ioapic_pin(int apic, int pin, int vector)
 {
        if (mpc_ioapic_ver(apic) >= 0x20) {
                io_apic_eoi(apic, vector);
@@ -551,7 +517,7 @@ void native_eoi_ioapic_pin(int apic, int pin, int vector)
                /*
                 * Mask the entry and change the trigger mode to edge.
                 */
-               entry1.mask = 1;
+               entry1.mask = IOAPIC_MASKED;
                entry1.trigger = IOAPIC_EDGE;
 
                __ioapic_write_entry(apic, pin, entry1);
@@ -563,15 +529,14 @@ void native_eoi_ioapic_pin(int apic, int pin, int vector)
        }
 }
 
-void eoi_ioapic_irq(unsigned int irq, struct irq_cfg *cfg)
+void eoi_ioapic_pin(int vector, struct mp_chip_data *data)
 {
-       struct irq_pin_list *entry;
        unsigned long flags;
+       struct irq_pin_list *entry;
 
        raw_spin_lock_irqsave(&ioapic_lock, flags);
-       for_each_irq_pin(entry, cfg->irq_2_pin)
-               x86_io_apic_ops.eoi_ioapic_pin(entry->apic, entry->pin,
-                                              cfg->vector);
+       for_each_irq_pin(entry, data->irq_2_pin)
+               __eoi_ioapic_pin(entry->apic, entry->pin, vector);
        raw_spin_unlock_irqrestore(&ioapic_lock, flags);
 }
 
@@ -588,8 +553,8 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
         * Make sure the entry is masked and re-read the contents to check
         * if it is a level triggered pin and if the remote-IRR is set.
         */
-       if (!entry.mask) {
-               entry.mask = 1;
+       if (entry.mask == IOAPIC_UNMASKED) {
+               entry.mask = IOAPIC_MASKED;
                ioapic_write_entry(apic, pin, entry);
                entry = ioapic_read_entry(apic, pin);
        }
@@ -602,13 +567,12 @@ static void clear_IO_APIC_pin(unsigned int apic, unsigned int pin)
                 * doesn't clear the remote-IRR if the trigger mode is not
                 * set to level.
                 */
-               if (!entry.trigger) {
+               if (entry.trigger == IOAPIC_EDGE) {
                        entry.trigger = IOAPIC_LEVEL;
                        ioapic_write_entry(apic, pin, entry);
                }
-
                raw_spin_lock_irqsave(&ioapic_lock, flags);
-               x86_io_apic_ops.eoi_ioapic_pin(apic, pin, entry.vector);
+               __eoi_ioapic_pin(apic, pin, entry.vector);
                raw_spin_unlock_irqrestore(&ioapic_lock, flags);
        }
 
@@ -706,8 +670,8 @@ void mask_ioapic_entries(void)
                        struct IO_APIC_route_entry entry;
 
                        entry = ioapics[apic].saved_registers[pin];
-                       if (!entry.mask) {
-                               entry.mask = 1;
+                       if (entry.mask == IOAPIC_UNMASKED) {
+                               entry.mask = IOAPIC_MASKED;
                                ioapic_write_entry(apic, pin, entry);
                        }
                }
@@ -809,11 +773,11 @@ static int EISA_ELCR(unsigned int irq)
 
 #endif
 
-/* ISA interrupts are always polarity zero edge triggered,
+/* ISA interrupts are always active high edge triggered,
  * when listed as conforming in the MP table. */
 
-#define default_ISA_trigger(idx)       (0)
-#define default_ISA_polarity(idx)      (0)
+#define default_ISA_trigger(idx)       (IOAPIC_EDGE)
+#define default_ISA_polarity(idx)      (IOAPIC_POL_HIGH)
 
 /* EISA interrupts are always polarity zero and can be edge or level
  * trigger depending on the ELCR value.  If an interrupt is listed as
@@ -823,53 +787,55 @@ static int EISA_ELCR(unsigned int irq)
 #define default_EISA_trigger(idx)      (EISA_ELCR(mp_irqs[idx].srcbusirq))
 #define default_EISA_polarity(idx)     default_ISA_polarity(idx)
 
-/* PCI interrupts are always polarity one level triggered,
+/* PCI interrupts are always active low level triggered,
  * when listed as conforming in the MP table. */
 
-#define default_PCI_trigger(idx)       (1)
-#define default_PCI_polarity(idx)      (1)
+#define default_PCI_trigger(idx)       (IOAPIC_LEVEL)
+#define default_PCI_polarity(idx)      (IOAPIC_POL_LOW)
 
 static int irq_polarity(int idx)
 {
        int bus = mp_irqs[idx].srcbus;
-       int polarity;
 
        /*
         * Determine IRQ line polarity (high active or low active):
         */
-       switch (mp_irqs[idx].irqflag & 3)
-       {
-               case 0: /* conforms, ie. bus-type dependent polarity */
-                       if (test_bit(bus, mp_bus_not_pci))
-                               polarity = default_ISA_polarity(idx);
-                       else
-                               polarity = default_PCI_polarity(idx);
-                       break;
-               case 1: /* high active */
-               {
-                       polarity = 0;
-                       break;
-               }
-               case 2: /* reserved */
-               {
-                       pr_warn("broken BIOS!!\n");
-                       polarity = 1;
-                       break;
-               }
-               case 3: /* low active */
-               {
-                       polarity = 1;
-                       break;
-               }
-               default: /* invalid */
-               {
-                       pr_warn("broken BIOS!!\n");
-                       polarity = 1;
-                       break;
-               }
+       switch (mp_irqs[idx].irqflag & 0x03) {
+       case 0:
+               /* conforms to spec, ie. bus-type dependent polarity */
+               if (test_bit(bus, mp_bus_not_pci))
+                       return default_ISA_polarity(idx);
+               else
+                       return default_PCI_polarity(idx);
+       case 1:
+               return IOAPIC_POL_HIGH;
+       case 2:
+               pr_warn("IOAPIC: Invalid polarity: 2, defaulting to low\n");
+       case 3:
+       default: /* Pointless default required due to do gcc stupidity */
+               return IOAPIC_POL_LOW;
+       }
+}
+
+#ifdef CONFIG_EISA
+static int eisa_irq_trigger(int idx, int bus, int trigger)
+{
+       switch (mp_bus_id_to_type[bus]) {
+       case MP_BUS_PCI:
+       case MP_BUS_ISA:
+               return trigger;
+       case MP_BUS_EISA:
+               return default_EISA_trigger(idx);
        }
-       return polarity;
+       pr_warn("IOAPIC: Invalid srcbus: %d defaulting to level\n", bus);
+       return IOAPIC_LEVEL;
 }
+#else
+static inline int eisa_irq_trigger(int idx, int bus, int trigger)
+{
+       return trigger;
+}
+#endif
 
 static int irq_trigger(int idx)
 {
@@ -879,153 +845,227 @@ static int irq_trigger(int idx)
        /*
         * Determine IRQ trigger mode (edge or level sensitive):
         */
-       switch ((mp_irqs[idx].irqflag>>2) & 3)
-       {
-               case 0: /* conforms, ie. bus-type dependent */
-                       if (test_bit(bus, mp_bus_not_pci))
-                               trigger = default_ISA_trigger(idx);
-                       else
-                               trigger = default_PCI_trigger(idx);
-#ifdef CONFIG_EISA
-                       switch (mp_bus_id_to_type[bus]) {
-                               case MP_BUS_ISA: /* ISA pin */
-                               {
-                                       /* set before the switch */
-                                       break;
-                               }
-                               case MP_BUS_EISA: /* EISA pin */
-                               {
-                                       trigger = default_EISA_trigger(idx);
-                                       break;
-                               }
-                               case MP_BUS_PCI: /* PCI pin */
-                               {
-                                       /* set before the switch */
-                                       break;
-                               }
-                               default:
-                               {
-                                       pr_warn("broken BIOS!!\n");
-                                       trigger = 1;
-                                       break;
-                               }
-                       }
+       switch ((mp_irqs[idx].irqflag >> 2) & 0x03) {
+       case 0:
+               /* conforms to spec, ie. bus-type dependent trigger mode */
+               if (test_bit(bus, mp_bus_not_pci))
+                       trigger = default_ISA_trigger(idx);
+               else
+                       trigger = default_PCI_trigger(idx);
+               /* Take EISA into account */
+               return eisa_irq_trigger(idx, bus, trigger);
+       case 1:
+               return IOAPIC_EDGE;
+       case 2:
+               pr_warn("IOAPIC: Invalid trigger mode 2 defaulting to level\n");
+       case 3:
+       default: /* Pointless default required due to do gcc stupidity */
+               return IOAPIC_LEVEL;
+       }
+}
+
+void ioapic_set_alloc_attr(struct irq_alloc_info *info, int node,
+                          int trigger, int polarity)
+{
+       init_irq_alloc_info(info, NULL);
+       info->type = X86_IRQ_ALLOC_TYPE_IOAPIC;
+       info->ioapic_node = node;
+       info->ioapic_trigger = trigger;
+       info->ioapic_polarity = polarity;
+       info->ioapic_valid = 1;
+}
+
+#ifndef CONFIG_ACPI
+int acpi_get_override_irq(u32 gsi, int *trigger, int *polarity);
 #endif
-                       break;
-               case 1: /* edge */
-               {
-                       trigger = 0;
-                       break;
-               }
-               case 2: /* reserved */
-               {
-                       pr_warn("broken BIOS!!\n");
-                       trigger = 1;
-                       break;
-               }
-               case 3: /* level */
-               {
-                       trigger = 1;
-                       break;
-               }
-               default: /* invalid */
-               {
-                       pr_warn("broken BIOS!!\n");
-                       trigger = 0;
-                       break;
+
+static void ioapic_copy_alloc_attr(struct irq_alloc_info *dst,
+                                  struct irq_alloc_info *src,
+                                  u32 gsi, int ioapic_idx, int pin)
+{
+       int trigger, polarity;
+
+       copy_irq_alloc_info(dst, src);
+       dst->type = X86_IRQ_ALLOC_TYPE_IOAPIC;
+       dst->ioapic_id = mpc_ioapic_id(ioapic_idx);
+       dst->ioapic_pin = pin;
+       dst->ioapic_valid = 1;
+       if (src && src->ioapic_valid) {
+               dst->ioapic_node = src->ioapic_node;
+               dst->ioapic_trigger = src->ioapic_trigger;
+               dst->ioapic_polarity = src->ioapic_polarity;
+       } else {
+               dst->ioapic_node = NUMA_NO_NODE;
+               if (acpi_get_override_irq(gsi, &trigger, &polarity) >= 0) {
+                       dst->ioapic_trigger = trigger;
+                       dst->ioapic_polarity = polarity;
+               } else {
+                       /*
+                        * PCI interrupts are always active low level
+                        * triggered.
+                        */
+                       dst->ioapic_trigger = IOAPIC_LEVEL;
+                       dst->ioapic_polarity = IOAPIC_POL_LOW;
                }
        }
-       return trigger;
 }
 
-static int alloc_irq_from_domain(struct irq_domain *domain, u32 gsi, int pin)
+static int ioapic_alloc_attr_node(struct irq_alloc_info *info)
+{
+       return (info && info->ioapic_valid) ? info->ioapic_node : NUMA_NO_NODE;
+}
+
+static void mp_register_handler(unsigned int irq, unsigned long trigger)
+{
+       irq_flow_handler_t hdl;
+       bool fasteoi;
+
+       if (trigger) {
+               irq_set_status_flags(irq, IRQ_LEVEL);
+               fasteoi = true;
+       } else {
+               irq_clear_status_flags(irq, IRQ_LEVEL);
+               fasteoi = false;
+       }
+
+       hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq;
+       __irq_set_handler(irq, hdl, 0, fasteoi ? "fasteoi" : "edge");
+}
+
+static bool mp_check_pin_attr(int irq, struct irq_alloc_info *info)
 {
+       struct mp_chip_data *data = irq_get_chip_data(irq);
+
+       /*
+        * setup_IO_APIC_irqs() programs all legacy IRQs with default trigger
+        * and polarity attirbutes. So allow the first user to reprogram the
+        * pin with real trigger and polarity attributes.
+        */
+       if (irq < nr_legacy_irqs() && data->count == 1) {
+               if (info->ioapic_trigger != data->trigger)
+                       mp_register_handler(irq, data->trigger);
+               data->entry.trigger = data->trigger = info->ioapic_trigger;
+               data->entry.polarity = data->polarity = info->ioapic_polarity;
+       }
+
+       return data->trigger == info->ioapic_trigger &&
+              data->polarity == info->ioapic_polarity;
+}
+
+static int alloc_irq_from_domain(struct irq_domain *domain, int ioapic, u32 gsi,
+                                struct irq_alloc_info *info)
+{
+       bool legacy = false;
        int irq = -1;
-       int ioapic = (int)(long)domain->host_data;
        int type = ioapics[ioapic].irqdomain_cfg.type;
 
        switch (type) {
        case IOAPIC_DOMAIN_LEGACY:
                /*
-                * Dynamically allocate IRQ number for non-ISA IRQs in the first 16
-                * GSIs on some weird platforms.
+                * Dynamically allocate IRQ number for non-ISA IRQs in the first
+                * 16 GSIs on some weird platforms.
                 */
-               if (gsi < nr_legacy_irqs())
-                       irq = irq_create_mapping(domain, pin);
-               else if (irq_create_strict_mappings(domain, gsi, pin, 1) == 0)
+               if (!ioapic_initialized || gsi >= nr_legacy_irqs())
                        irq = gsi;
+               legacy = mp_is_legacy_irq(irq);
                break;
        case IOAPIC_DOMAIN_STRICT:
-               if (irq_create_strict_mappings(domain, gsi, pin, 1) == 0)
-                       irq = gsi;
+               irq = gsi;
                break;
        case IOAPIC_DOMAIN_DYNAMIC:
-               irq = irq_create_mapping(domain, pin);
                break;
        default:
                WARN(1, "ioapic: unknown irqdomain type %d\n", type);
-               break;
+               return -1;
+       }
+
+       return __irq_domain_alloc_irqs(domain, irq, 1,
+                                      ioapic_alloc_attr_node(info),
+                                      info, legacy);
+}
+
+/*
+ * Need special handling for ISA IRQs because there may be multiple IOAPIC pins
+ * sharing the same ISA IRQ number and irqdomain only supports 1:1 mapping
+ * between IOAPIC pin and IRQ number. A typical IOAPIC has 24 pins, pin 0-15 are
+ * used for legacy IRQs and pin 16-23 are used for PCI IRQs (PIRQ A-H).
+ * When ACPI is disabled, only legacy IRQ numbers (IRQ0-15) are available, and
+ * some BIOSes may use MP Interrupt Source records to override IRQ numbers for
+ * PIRQs instead of reprogramming the interrupt routing logic. Thus there may be
+ * multiple pins sharing the same legacy IRQ number when ACPI is disabled.
+ */
+static int alloc_isa_irq_from_domain(struct irq_domain *domain,
+                                    int irq, int ioapic, int pin,
+                                    struct irq_alloc_info *info)
+{
+       struct mp_chip_data *data;
+       struct irq_data *irq_data = irq_get_irq_data(irq);
+       int node = ioapic_alloc_attr_node(info);
+
+       /*
+        * Legacy ISA IRQ has already been allocated, just add pin to
+        * the pin list assoicated with this IRQ and program the IOAPIC
+        * entry. The IOAPIC entry
+        */
+       if (irq_data && irq_data->parent_data) {
+               if (!mp_check_pin_attr(irq, info))
+                       return -EBUSY;
+               if (__add_pin_to_irq_node(irq_data->chip_data, node, ioapic,
+                                         info->ioapic_pin))
+                       return -ENOMEM;
+       } else {
+               irq = __irq_domain_alloc_irqs(domain, irq, 1, node, info, true);
+               if (irq >= 0) {
+                       irq_data = irq_domain_get_irq_data(domain, irq);
+                       data = irq_data->chip_data;
+                       data->isa_irq = true;
+               }
        }
 
-       return irq > 0 ? irq : -1;
+       return irq;
 }
 
 static int mp_map_pin_to_irq(u32 gsi, int idx, int ioapic, int pin,
-                            unsigned int flags)
+                            unsigned int flags, struct irq_alloc_info *info)
 {
        int irq;
+       bool legacy = false;
+       struct irq_alloc_info tmp;
+       struct mp_chip_data *data;
        struct irq_domain *domain = mp_ioapic_irqdomain(ioapic);
-       struct mp_pin_info *info = mp_pin_info(ioapic, pin);
 
        if (!domain)
-               return -1;
+               return -ENOSYS;
 
-       mutex_lock(&ioapic_mutex);
-
-       /*
-        * Don't use irqdomain to manage ISA IRQs because there may be
-        * multiple IOAPIC pins sharing the same ISA IRQ number and
-        * irqdomain only supports 1:1 mapping between IOAPIC pin and
-        * IRQ number. A typical IOAPIC has 24 pins, pin 0-15 are used
-        * for legacy IRQs and pin 16-23 are used for PCI IRQs (PIRQ A-H).
-        * When ACPI is disabled, only legacy IRQ numbers (IRQ0-15) are
-        * available, and some BIOSes may use MP Interrupt Source records
-        * to override IRQ numbers for PIRQs instead of reprogramming
-        * the interrupt routing logic. Thus there may be multiple pins
-        * sharing the same legacy IRQ number when ACPI is disabled.
-        */
        if (idx >= 0 && test_bit(mp_irqs[idx].srcbus, mp_bus_not_pci)) {
                irq = mp_irqs[idx].srcbusirq;
-               if (flags & IOAPIC_MAP_ALLOC) {
-                       if (info->count == 0 &&
-                           mp_irqdomain_map(domain, irq, pin) != 0)
-                               irq = -1;
+               legacy = mp_is_legacy_irq(irq);
+       }
 
-                       /* special handling for timer IRQ0 */
+       mutex_lock(&ioapic_mutex);
+       if (!(flags & IOAPIC_MAP_ALLOC)) {
+               if (!legacy) {
+                       irq = irq_find_mapping(domain, pin);
                        if (irq == 0)
-                               info->count++;
+                               irq = -ENOENT;
                }
        } else {
-               irq = irq_find_mapping(domain, pin);
-               if (irq <= 0 && (flags & IOAPIC_MAP_ALLOC))
-                       irq = alloc_irq_from_domain(domain, gsi, pin);
-       }
-
-       if (flags & IOAPIC_MAP_ALLOC) {
-               /* special handling for legacy IRQs */
-               if (irq < nr_legacy_irqs() && info->count == 1 &&
-                   mp_irqdomain_map(domain, irq, pin) != 0)
-                       irq = -1;
-
-               if (irq > 0)
-                       info->count++;
-               else if (info->count == 0)
-                       info->set = 0;
+               ioapic_copy_alloc_attr(&tmp, info, gsi, ioapic, pin);
+               if (legacy)
+                       irq = alloc_isa_irq_from_domain(domain, irq,
+                                                       ioapic, pin, &tmp);
+               else if ((irq = irq_find_mapping(domain, pin)) == 0)
+                       irq = alloc_irq_from_domain(domain, ioapic, gsi, &tmp);
+               else if (!mp_check_pin_attr(irq, &tmp))
+                       irq = -EBUSY;
+               if (irq >= 0) {
+                       data = irq_get_chip_data(irq);
+                       data->count++;
+               }
        }
-
        mutex_unlock(&ioapic_mutex);
 
-       return irq > 0 ? irq : -1;
+       return irq;
 }
 
 static int pin_2_irq(int idx, int ioapic, int pin, unsigned int flags)
@@ -1058,10 +1098,10 @@ static int pin_2_irq(int idx, int ioapic, int pin, unsigned int flags)
        }
 #endif
 
-       return  mp_map_pin_to_irq(gsi, idx, ioapic, pin, flags);
+       return  mp_map_pin_to_irq(gsi, idx, ioapic, pin, flags, NULL);
 }
 
-int mp_map_gsi_to_irq(u32 gsi, unsigned int flags)
+int mp_map_gsi_to_irq(u32 gsi, unsigned int flags, struct irq_alloc_info *info)
 {
        int ioapic, pin, idx;
 
@@ -1074,31 +1114,24 @@ int mp_map_gsi_to_irq(u32 gsi, unsigned int flags)
        if ((flags & IOAPIC_MAP_CHECK) && idx < 0)
                return -1;
 
-       return mp_map_pin_to_irq(gsi, idx, ioapic, pin, flags);
+       return mp_map_pin_to_irq(gsi, idx, ioapic, pin, flags, info);
 }
 
 void mp_unmap_irq(int irq)
 {
-       struct irq_data *data = irq_get_irq_data(irq);
-       struct mp_pin_info *info;
-       int ioapic, pin;
+       struct irq_data *irq_data = irq_get_irq_data(irq);
+       struct mp_chip_data *data;
 
-       if (!data || !data->domain)
+       if (!irq_data || !irq_data->domain)
                return;
 
-       ioapic = (int)(long)data->domain->host_data;
-       pin = (int)data->hwirq;
-       info = mp_pin_info(ioapic, pin);
+       data = irq_data->chip_data;
+       if (!data || data->isa_irq)
+               return;
 
        mutex_lock(&ioapic_mutex);
-       if (--info->count == 0) {
-               info->set = 0;
-               if (irq < nr_legacy_irqs() &&
-                   ioapics[ioapic].irqdomain_cfg.type == IOAPIC_DOMAIN_LEGACY)
-                       mp_irqdomain_unmap(data->domain, irq);
-               else
-                       irq_dispose_mapping(irq);
-       }
+       if (--data->count == 0)
+               irq_domain_free_irqs(irq, 1);
        mutex_unlock(&ioapic_mutex);
 }
 
@@ -1165,7 +1198,7 @@ out:
 }
 EXPORT_SYMBOL(IO_APIC_get_PCI_irq_vector);
 
-static struct irq_chip ioapic_chip;
+static struct irq_chip ioapic_chip, ioapic_ir_chip;
 
 #ifdef CONFIG_X86_32
 static inline int IO_APIC_irq_trigger(int irq)
@@ -1189,96 +1222,6 @@ static inline int IO_APIC_irq_trigger(int irq)
 }
 #endif
 
-static void ioapic_register_intr(unsigned int irq, struct irq_cfg *cfg,
-                                unsigned long trigger)
-{
-       struct irq_chip *chip = &ioapic_chip;
-       irq_flow_handler_t hdl;
-       bool fasteoi;
-
-       if ((trigger == IOAPIC_AUTO && IO_APIC_irq_trigger(irq)) ||
-           trigger == IOAPIC_LEVEL) {
-               irq_set_status_flags(irq, IRQ_LEVEL);
-               fasteoi = true;
-       } else {
-               irq_clear_status_flags(irq, IRQ_LEVEL);
-               fasteoi = false;
-       }
-
-       if (setup_remapped_irq(irq, cfg, chip))
-               fasteoi = trigger != 0;
-
-       hdl = fasteoi ? handle_fasteoi_irq : handle_edge_irq;
-       irq_set_chip_and_handler_name(irq, chip, hdl,
-                                     fasteoi ? "fasteoi" : "edge");
-}
-
-int native_setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
-                             unsigned int destination, int vector,
-                             struct io_apic_irq_attr *attr)
-{
-       memset(entry, 0, sizeof(*entry));
-
-       entry->delivery_mode = apic->irq_delivery_mode;
-       entry->dest_mode     = apic->irq_dest_mode;
-       entry->dest          = destination;
-       entry->vector        = vector;
-       entry->mask          = 0;                       /* enable IRQ */
-       entry->trigger       = attr->trigger;
-       entry->polarity      = attr->polarity;
-
-       /*
-        * Mask level triggered irqs.
-        * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
-        */
-       if (attr->trigger)
-               entry->mask = 1;
-
-       return 0;
-}
-
-static void setup_ioapic_irq(unsigned int irq, struct irq_cfg *cfg,
-                               struct io_apic_irq_attr *attr)
-{
-       struct IO_APIC_route_entry entry;
-       unsigned int dest;
-
-       if (!IO_APIC_IRQ(irq))
-               return;
-
-       if (assign_irq_vector(irq, cfg, apic->target_cpus()))
-               return;
-
-       if (apic->cpu_mask_to_apicid_and(cfg->domain, apic->target_cpus(),
-                                        &dest)) {
-               pr_warn("Failed to obtain apicid for ioapic %d, pin %d\n",
-                       mpc_ioapic_id(attr->ioapic), attr->ioapic_pin);
-               clear_irq_vector(irq, cfg);
-
-               return;
-       }
-
-       apic_printk(APIC_VERBOSE,KERN_DEBUG
-                   "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> "
-                   "IRQ %d Mode:%i Active:%i Dest:%d)\n",
-                   attr->ioapic, mpc_ioapic_id(attr->ioapic), attr->ioapic_pin,
-                   cfg->vector, irq, attr->trigger, attr->polarity, dest);
-
-       if (x86_io_apic_ops.setup_entry(irq, &entry, dest, cfg->vector, attr)) {
-               pr_warn("Failed to setup ioapic entry for ioapic  %d, pin %d\n",
-                       mpc_ioapic_id(attr->ioapic), attr->ioapic_pin);
-               clear_irq_vector(irq, cfg);
-
-               return;
-       }
-
-       ioapic_register_intr(irq, cfg, attr->trigger);
-       if (irq < nr_legacy_irqs())
-               legacy_pic->mask(irq);
-
-       ioapic_write_entry(attr->ioapic, attr->ioapic_pin, entry);
-}
-
 static void __init setup_IO_APIC_irqs(void)
 {
        unsigned int ioapic, pin;
@@ -1298,106 +1241,41 @@ static void __init setup_IO_APIC_irqs(void)
        }
 }
 
-/*
- * Set up the timer pin, possibly with the 8259A-master behind.
- */
-static void __init setup_timer_IRQ0_pin(unsigned int ioapic_idx,
-                                       unsigned int pin, int vector)
-{
-       struct IO_APIC_route_entry entry;
-       unsigned int dest;
-
-       memset(&entry, 0, sizeof(entry));
-
-       /*
-        * We use logical delivery to get the timer IRQ
-        * to the first CPU.
-        */
-       if (unlikely(apic->cpu_mask_to_apicid_and(apic->target_cpus(),
-                                                 apic->target_cpus(), &dest)))
-               dest = BAD_APICID;
-
-       entry.dest_mode = apic->irq_dest_mode;
-       entry.mask = 0;                 /* don't mask IRQ for edge */
-       entry.dest = dest;
-       entry.delivery_mode = apic->irq_delivery_mode;
-       entry.polarity = 0;
-       entry.trigger = 0;
-       entry.vector = vector;
-
-       /*
-        * The timer IRQ doesn't have to know that behind the
-        * scene we may have a 8259A-master in AEOI mode ...
-        */
-       irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq,
-                                     "edge");
-
-       /*
-        * Add it to the IO-APIC irq-routing table:
-        */
-       ioapic_write_entry(ioapic_idx, pin, entry);
-}
-
-void native_io_apic_print_entries(unsigned int apic, unsigned int nr_entries)
+void ioapic_zap_locks(void)
 {
-       int i;
-
-       pr_debug(" NR Dst Mask Trig IRR Pol Stat Dmod Deli Vect:\n");
-
-       for (i = 0; i <= nr_entries; i++) {
-               struct IO_APIC_route_entry entry;
-
-               entry = ioapic_read_entry(apic, i);
-
-               pr_debug(" %02x %02X  ", i, entry.dest);
-               pr_cont("%1d    %1d    %1d   %1d   %1d    "
-                       "%1d    %1d    %02X\n",
-                       entry.mask,
-                       entry.trigger,
-                       entry.irr,
-                       entry.polarity,
-                       entry.delivery_status,
-                       entry.dest_mode,
-                       entry.delivery_mode,
-                       entry.vector);
-       }
+       raw_spin_lock_init(&ioapic_lock);
 }
 
-void intel_ir_io_apic_print_entries(unsigned int apic,
-                                   unsigned int nr_entries)
+static void io_apic_print_entries(unsigned int apic, unsigned int nr_entries)
 {
        int i;
+       char buf[256];
+       struct IO_APIC_route_entry entry;
+       struct IR_IO_APIC_route_entry *ir_entry = (void *)&entry;
 
-       pr_debug(" NR Indx Fmt Mask Trig IRR Pol Stat Indx2 Zero Vect:\n");
-
+       printk(KERN_DEBUG "IOAPIC %d:\n", apic);
        for (i = 0; i <= nr_entries; i++) {
-               struct IR_IO_APIC_route_entry *ir_entry;
-               struct IO_APIC_route_entry entry;
-
                entry = ioapic_read_entry(apic, i);
-
-               ir_entry = (struct IR_IO_APIC_route_entry *)&entry;
-
-               pr_debug(" %02x %04X ", i, ir_entry->index);
-               pr_cont("%1d   %1d    %1d    %1d   %1d   "
-                       "%1d    %1d     %X    %02X\n",
-                       ir_entry->format,
-                       ir_entry->mask,
-                       ir_entry->trigger,
-                       ir_entry->irr,
-                       ir_entry->polarity,
-                       ir_entry->delivery_status,
-                       ir_entry->index2,
-                       ir_entry->zero,
-                       ir_entry->vector);
+               snprintf(buf, sizeof(buf),
+                        " pin%02x, %s, %s, %s, V(%02X), IRR(%1d), S(%1d)",
+                        i,
+                        entry.mask == IOAPIC_MASKED ? "disabled" : "enabled ",
+                        entry.trigger == IOAPIC_LEVEL ? "level" : "edge ",
+                        entry.polarity == IOAPIC_POL_LOW ? "low " : "high",
+                        entry.vector, entry.irr, entry.delivery_status);
+               if (ir_entry->format)
+                       printk(KERN_DEBUG "%s, remapped, I(%04X),  Z(%X)\n",
+                              buf, (ir_entry->index << 15) | ir_entry->index,
+                              ir_entry->zero);
+               else
+                       printk(KERN_DEBUG "%s, %s, D(%02X), M(%1d)\n",
+                              buf,
+                              entry.dest_mode == IOAPIC_DEST_MODE_LOGICAL ?
+                              "logical " : "physical",
+                              entry.dest, entry.delivery_mode);
        }
 }
 
-void ioapic_zap_locks(void)
-{
-       raw_spin_lock_init(&ioapic_lock);
-}
-
 static void __init print_IO_APIC(int ioapic_idx)
 {
        union IO_APIC_reg_00 reg_00;
@@ -1451,16 +1329,13 @@ static void __init print_IO_APIC(int ioapic_idx)
        }
 
        printk(KERN_DEBUG ".... IRQ redirection table:\n");
-
-       x86_io_apic_ops.print_entries(ioapic_idx, reg_01.bits.entries);
+       io_apic_print_entries(ioapic_idx, reg_01.bits.entries);
 }
 
 void __init print_IO_APICs(void)
 {
        int ioapic_idx;
-       struct irq_cfg *cfg;
        unsigned int irq;
-       struct irq_chip *chip;
 
        printk(KERN_DEBUG "number of MP IRQ sources: %d.\n", mp_irq_entries);
        for_each_ioapic(ioapic_idx)
@@ -1480,18 +1355,20 @@ void __init print_IO_APICs(void)
        printk(KERN_DEBUG "IRQ to pin mappings:\n");
        for_each_active_irq(irq) {
                struct irq_pin_list *entry;
+               struct irq_chip *chip;
+               struct mp_chip_data *data;
 
                chip = irq_get_chip(irq);
-               if (chip != &ioapic_chip)
+               if (chip != &ioapic_chip && chip != &ioapic_ir_chip)
                        continue;
-
-               cfg = irq_cfg(irq);
-               if (!cfg)
+               data = irq_get_chip_data(irq);
+               if (!data)
                        continue;
-               if (list_empty(&cfg->irq_2_pin))
+               if (list_empty(&data->irq_2_pin))
                        continue;
+
                printk(KERN_DEBUG "IRQ%d ", irq);
-               for_each_irq_pin(entry, cfg->irq_2_pin)
+               for_each_irq_pin(entry, data->irq_2_pin)
                        pr_cont("-> %d:%d", entry->apic, entry->pin);
                pr_cont("\n");
        }
@@ -1564,15 +1441,12 @@ void native_disable_io_apic(void)
                struct IO_APIC_route_entry entry;
 
                memset(&entry, 0, sizeof(entry));
-               entry.mask            = 0; /* Enabled */
-               entry.trigger         = 0; /* Edge */
-               entry.irr             = 0;
-               entry.polarity        = 0; /* High */
-               entry.delivery_status = 0;
-               entry.dest_mode       = 0; /* Physical */
-               entry.delivery_mode   = dest_ExtINT; /* ExtInt */
-               entry.vector          = 0;
-               entry.dest            = read_apic_id();
+               entry.mask              = IOAPIC_UNMASKED;
+               entry.trigger           = IOAPIC_EDGE;
+               entry.polarity          = IOAPIC_POL_HIGH;
+               entry.dest_mode         = IOAPIC_DEST_MODE_PHYSICAL;
+               entry.delivery_mode     = dest_ExtINT;
+               entry.dest              = read_apic_id();
 
                /*
                 * Add it to the IO-APIC irq-routing table:
@@ -1582,7 +1456,6 @@ void native_disable_io_apic(void)
 
        if (cpu_has_apic || apic_from_smp_config())
                disconnect_bsp_APIC(ioapic_i8259.pin != -1);
-
 }
 
 /*
@@ -1792,7 +1665,6 @@ static int __init timer_irq_works(void)
  * This is not complete - we should be able to fake
  * an edge even if it isn't on the 8259A...
  */
-
 static unsigned int startup_ioapic_irq(struct irq_data *data)
 {
        int was_pending = 0, irq = data->irq;
@@ -1804,74 +1676,22 @@ static unsigned int startup_ioapic_irq(struct irq_data *data)
                if (legacy_pic->irq_pending(irq))
                        was_pending = 1;
        }
-       __unmask_ioapic(irqd_cfg(data));
+       __unmask_ioapic(data->chip_data);
        raw_spin_unlock_irqrestore(&ioapic_lock, flags);
 
        return was_pending;
 }
 
-/*
- * Level and edge triggered IO-APIC interrupts need different handling,
- * so we use two separate IRQ descriptors. Edge triggered IRQs can be
- * handled with the level-triggered descriptor, but that one has slightly
- * more overhead. Level-triggered interrupts cannot be handled with the
- * edge-triggered handler, without risking IRQ storms and other ugly
- * races.
- */
-
-static void __target_IO_APIC_irq(unsigned int irq, unsigned int dest, struct irq_cfg *cfg)
-{
-       int apic, pin;
-       struct irq_pin_list *entry;
-       u8 vector = cfg->vector;
-
-       for_each_irq_pin(entry, cfg->irq_2_pin) {
-               unsigned int reg;
-
-               apic = entry->apic;
-               pin = entry->pin;
-
-               io_apic_write(apic, 0x11 + pin*2, dest);
-               reg = io_apic_read(apic, 0x10 + pin*2);
-               reg &= ~IO_APIC_REDIR_VECTOR_MASK;
-               reg |= vector;
-               io_apic_modify(apic, 0x10 + pin*2, reg);
-       }
-}
-
-int native_ioapic_set_affinity(struct irq_data *data,
-                              const struct cpumask *mask,
-                              bool force)
-{
-       unsigned int dest, irq = data->irq;
-       unsigned long flags;
-       int ret;
-
-       if (!config_enabled(CONFIG_SMP))
-               return -EPERM;
-
-       raw_spin_lock_irqsave(&ioapic_lock, flags);
-       ret = apic_set_affinity(data, mask, &dest);
-       if (!ret) {
-               /* Only the high 8 bits are valid. */
-               dest = SET_APIC_LOGICAL_ID(dest);
-               __target_IO_APIC_irq(irq, dest, irqd_cfg(data));
-               ret = IRQ_SET_MASK_OK_NOCOPY;
-       }
-       raw_spin_unlock_irqrestore(&ioapic_lock, flags);
-       return ret;
-}
-
 atomic_t irq_mis_count;
 
 #ifdef CONFIG_GENERIC_PENDING_IRQ
-static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
+static bool io_apic_level_ack_pending(struct mp_chip_data *data)
 {
        struct irq_pin_list *entry;
        unsigned long flags;
 
        raw_spin_lock_irqsave(&ioapic_lock, flags);
-       for_each_irq_pin(entry, cfg->irq_2_pin) {
+       for_each_irq_pin(entry, data->irq_2_pin) {
                unsigned int reg;
                int pin;
 
@@ -1888,18 +1708,17 @@ static bool io_apic_level_ack_pending(struct irq_cfg *cfg)
        return false;
 }
 
-static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
+static inline bool ioapic_irqd_mask(struct irq_data *data)
 {
        /* If we are moving the irq we need to mask it */
        if (unlikely(irqd_is_setaffinity_pending(data))) {
-               mask_ioapic(cfg);
+               mask_ioapic_irq(data);
                return true;
        }
        return false;
 }
 
-static inline void ioapic_irqd_unmask(struct irq_data *data,
-                                     struct irq_cfg *cfg, bool masked)
+static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked)
 {
        if (unlikely(masked)) {
                /* Only migrate the irq if the ack has been received.
@@ -1928,31 +1747,30 @@ static inline void ioapic_irqd_unmask(struct irq_data *data,
                 * accurate and is causing problems then it is a hardware bug
                 * and you can go talk to the chipset vendor about it.
                 */
-               if (!io_apic_level_ack_pending(cfg))
+               if (!io_apic_level_ack_pending(data->chip_data))
                        irq_move_masked_irq(data);
-               unmask_ioapic(cfg);
+               unmask_ioapic_irq(data);
        }
 }
 #else
-static inline bool ioapic_irqd_mask(struct irq_data *data, struct irq_cfg *cfg)
+static inline bool ioapic_irqd_mask(struct irq_data *data)
 {
        return false;
 }
-static inline void ioapic_irqd_unmask(struct irq_data *data,
-                                     struct irq_cfg *cfg, bool masked)
+static inline void ioapic_irqd_unmask(struct irq_data *data, bool masked)
 {
 }
 #endif
 
-static void ack_ioapic_level(struct irq_data *data)
+static void ioapic_ack_level(struct irq_data *irq_data)
 {
-       struct irq_cfg *cfg = irqd_cfg(data);
-       int i, irq = data->irq;
+       struct irq_cfg *cfg = irqd_cfg(irq_data);
        unsigned long v;
        bool masked;
+       int i;
 
        irq_complete_move(cfg);
-       masked = ioapic_irqd_mask(data, cfg);
+       masked = ioapic_irqd_mask(irq_data);
 
        /*
         * It appears there is an erratum which affects at least version 0x11
@@ -2004,11 +1822,49 @@ static void ack_ioapic_level(struct irq_data *data)
         */
        if (!(v & (1 << (i & 0x1f)))) {
                atomic_inc(&irq_mis_count);
+               eoi_ioapic_pin(cfg->vector, irq_data->chip_data);
+       }
+
+       ioapic_irqd_unmask(irq_data, masked);
+}
+
+static void ioapic_ir_ack_level(struct irq_data *irq_data)
+{
+       struct mp_chip_data *data = irq_data->chip_data;
+
+       /*
+        * Intr-remapping uses pin number as the virtual vector
+        * in the RTE. Actual vector is programmed in
+        * intr-remapping table entry. Hence for the io-apic
+        * EOI we use the pin number.
+        */
+       ack_APIC_irq();
+       eoi_ioapic_pin(data->entry.vector, data);
+}
 
-               eoi_ioapic_irq(irq, cfg);
+static int ioapic_set_affinity(struct irq_data *irq_data,
+                              const struct cpumask *mask, bool force)
+{
+       struct irq_data *parent = irq_data->parent_data;
+       struct mp_chip_data *data = irq_data->chip_data;
+       struct irq_pin_list *entry;
+       struct irq_cfg *cfg;
+       unsigned long flags;
+       int ret;
+
+       ret = parent->chip->irq_set_affinity(parent, mask, force);
+       raw_spin_lock_irqsave(&ioapic_lock, flags);
+       if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) {
+               cfg = irqd_cfg(irq_data);
+               data->entry.dest = cfg->dest_apicid;
+               data->entry.vector = cfg->vector;
+               for_each_irq_pin(entry, data->irq_2_pin)
+                       __ioapic_write_entry(entry->apic, entry->pin,
+                                            data->entry);
        }
+       raw_spin_unlock_irqrestore(&ioapic_lock, flags);
 
-       ioapic_irqd_unmask(data, cfg, masked);
+       return ret;
 }
 
 static struct irq_chip ioapic_chip __read_mostly = {
@@ -2016,10 +1872,20 @@ static struct irq_chip ioapic_chip __read_mostly = {
        .irq_startup            = startup_ioapic_irq,
        .irq_mask               = mask_ioapic_irq,
        .irq_unmask             = unmask_ioapic_irq,
-       .irq_ack                = apic_ack_edge,
-       .irq_eoi                = ack_ioapic_level,
-       .irq_set_affinity       = native_ioapic_set_affinity,
-       .irq_retrigger          = apic_retrigger_irq,
+       .irq_ack                = irq_chip_ack_parent,
+       .irq_eoi                = ioapic_ack_level,
+       .irq_set_affinity       = ioapic_set_affinity,
+       .flags                  = IRQCHIP_SKIP_SET_WAKE,
+};
+
+static struct irq_chip ioapic_ir_chip __read_mostly = {
+       .name                   = "IR-IO-APIC",
+       .irq_startup            = startup_ioapic_irq,
+       .irq_mask               = mask_ioapic_irq,
+       .irq_unmask             = unmask_ioapic_irq,
+       .irq_ack                = irq_chip_ack_parent,
+       .irq_eoi                = ioapic_ir_ack_level,
+       .irq_set_affinity       = ioapic_set_affinity,
        .flags                  = IRQCHIP_SKIP_SET_WAKE,
 };
 
@@ -2113,12 +1979,12 @@ static inline void __init unlock_ExtINT_logic(void)
 
        memset(&entry1, 0, sizeof(entry1));
 
-       entry1.dest_mode = 0;                   /* physical delivery */
-       entry1.mask = 0;                        /* unmask IRQ now */
+       entry1.dest_mode = IOAPIC_DEST_MODE_PHYSICAL;
+       entry1.mask = IOAPIC_UNMASKED;
        entry1.dest = hard_smp_processor_id();
        entry1.delivery_mode = dest_ExtINT;
        entry1.polarity = entry0.polarity;
-       entry1.trigger = 0;
+       entry1.trigger = IOAPIC_EDGE;
        entry1.vector = 0;
 
        ioapic_write_entry(apic, pin, entry1);
@@ -2152,6 +2018,25 @@ static int __init disable_timer_pin_setup(char *arg)
 }
 early_param("disable_timer_pin_1", disable_timer_pin_setup);
 
+static int mp_alloc_timer_irq(int ioapic, int pin)
+{
+       int irq = -1;
+       struct irq_domain *domain = mp_ioapic_irqdomain(ioapic);
+
+       if (domain) {
+               struct irq_alloc_info info;
+
+               ioapic_set_alloc_attr(&info, NUMA_NO_NODE, 0, 0);
+               info.ioapic_id = mpc_ioapic_id(ioapic);
+               info.ioapic_pin = pin;
+               mutex_lock(&ioapic_mutex);
+               irq = alloc_isa_irq_from_domain(domain, 0, ioapic, pin, &info);
+               mutex_unlock(&ioapic_mutex);
+       }
+
+       return irq;
+}
+
 /*
  * This code may look a bit paranoid, but it's supposed to cooperate with
  * a wide range of boards and BIOS bugs.  Fortunately only the timer IRQ
@@ -2162,7 +2047,9 @@ early_param("disable_timer_pin_1", disable_timer_pin_setup);
  */
 static inline void __init check_timer(void)
 {
-       struct irq_cfg *cfg = irq_cfg(0);
+       struct irq_data *irq_data = irq_get_irq_data(0);
+       struct mp_chip_data *data = irq_data->chip_data;
+       struct irq_cfg *cfg = irqd_cfg(irq_data);
        int node = cpu_to_node(0);
        int apic1, pin1, apic2, pin2;
        unsigned long flags;
@@ -2174,7 +2061,6 @@ static inline void __init check_timer(void)
         * get/set the timer IRQ vector:
         */
        legacy_pic->mask(0);
-       assign_irq_vector(0, cfg, apic->target_cpus());
 
        /*
         * As IRQ0 is to be enabled in the 8259A, the virtual
@@ -2215,23 +2101,21 @@ static inline void __init check_timer(void)
        }
 
        if (pin1 != -1) {
-               /*
-                * Ok, does IRQ0 through the IOAPIC work?
-                */
+               /* Ok, does IRQ0 through the IOAPIC work? */
                if (no_pin1) {
-                       add_pin_to_irq_node(cfg, node, apic1, pin1);
-                       setup_timer_IRQ0_pin(apic1, pin1, cfg->vector);
+                       mp_alloc_timer_irq(apic1, pin1);
                } else {
-                       /* for edge trigger, setup_ioapic_irq already
-                        * leave it unmasked.
+                       /*
+                        * for edge trigger, it's already unmasked,
                         * so only need to unmask if it is level-trigger
                         * do we really have level trigger timer?
                         */
                        int idx;
                        idx = find_irq_entry(apic1, pin1, mp_INT);
                        if (idx != -1 && irq_trigger(idx))
-                               unmask_ioapic(cfg);
+                               unmask_ioapic_irq(irq_get_chip_data(0));
                }
+               irq_domain_activate_irq(irq_data);
                if (timer_irq_works()) {
                        if (disable_timer_pin_1 > 0)
                                clear_IO_APIC_pin(0, pin1);
@@ -2251,8 +2135,8 @@ static inline void __init check_timer(void)
                /*
                 * legacy devices should be connected to IO APIC #0
                 */
-               replace_pin_at_irq_node(cfg, node, apic1, pin1, apic2, pin2);
-               setup_timer_IRQ0_pin(apic2, pin2, cfg->vector);
+               replace_pin_at_irq_node(data, node, apic1, pin1, apic2, pin2);
+               irq_domain_activate_irq(irq_data);
                legacy_pic->unmask(0);
                if (timer_irq_works()) {
                        apic_printk(APIC_QUIET, KERN_INFO "....... works.\n");
@@ -2329,36 +2213,35 @@ out:
 
 static int mp_irqdomain_create(int ioapic)
 {
-       size_t size;
+       struct irq_alloc_info info;
+       struct irq_domain *parent;
        int hwirqs = mp_ioapic_pin_count(ioapic);
        struct ioapic *ip = &ioapics[ioapic];
        struct ioapic_domain_cfg *cfg = &ip->irqdomain_cfg;
        struct mp_ioapic_gsi *gsi_cfg = mp_ioapic_gsi_routing(ioapic);
 
-       size = sizeof(struct mp_pin_info) * mp_ioapic_pin_count(ioapic);
-       ip->pin_info = kzalloc(size, GFP_KERNEL);
-       if (!ip->pin_info)
-               return -ENOMEM;
-
        if (cfg->type == IOAPIC_DOMAIN_INVALID)
                return 0;
 
+       init_irq_alloc_info(&info, NULL);
+       info.type = X86_IRQ_ALLOC_TYPE_IOAPIC;
+       info.ioapic_id = mpc_ioapic_id(ioapic);
+       parent = irq_remapping_get_ir_irq_domain(&info);
+       if (!parent)
+               parent = x86_vector_domain;
+
        ip->irqdomain = irq_domain_add_linear(cfg->dev, hwirqs, cfg->ops,
                                              (void *)(long)ioapic);
-       if(!ip->irqdomain) {
-               kfree(ip->pin_info);
-               ip->pin_info = NULL;
+       if (!ip->irqdomain)
                return -ENOMEM;
-       }
+
+       ip->irqdomain->parent = parent;
 
        if (cfg->type == IOAPIC_DOMAIN_LEGACY ||
            cfg->type == IOAPIC_DOMAIN_STRICT)
                ioapic_dynirq_base = max(ioapic_dynirq_base,
                                         gsi_cfg->gsi_end + 1);
 
-       if (gsi_cfg->gsi_base == 0)
-               irq_set_default_host(ip->irqdomain);
-
        return 0;
 }
 
@@ -2368,8 +2251,6 @@ static void ioapic_destroy_irqdomain(int idx)
                irq_domain_remove(ioapics[idx].irqdomain);
                ioapics[idx].irqdomain = NULL;
        }
-       kfree(ioapics[idx].pin_info);
-       ioapics[idx].pin_info = NULL;
 }
 
 void __init setup_IO_APIC(void)
@@ -2399,20 +2280,6 @@ void __init setup_IO_APIC(void)
        ioapic_initialized = 1;
 }
 
-/*
- *      Called after all the initialization is done. If we didn't find any
- *      APIC bugs then we can allow the modify fast path
- */
-
-static int __init io_apic_bug_finalize(void)
-{
-       if (sis_apic_bug == -1)
-               sis_apic_bug = 0;
-       return 0;
-}
-
-late_initcall(io_apic_bug_finalize);
-
 static void resume_ioapic_id(int ioapic_idx)
 {
        unsigned long flags;
@@ -2451,20 +2318,6 @@ static int __init ioapic_init_ops(void)
 
 device_initcall(ioapic_init_ops);
 
-static int
-io_apic_setup_irq_pin(unsigned int irq, int node, struct io_apic_irq_attr *attr)
-{
-       struct irq_cfg *cfg = alloc_irq_and_cfg_at(irq, node);
-       int ret;
-
-       if (!cfg)
-               return -EINVAL;
-       ret = __add_pin_to_irq_node(cfg, node, attr->ioapic, attr->ioapic_pin);
-       if (!ret)
-               setup_ioapic_irq(irq, cfg, attr);
-       return ret;
-}
-
 static int io_apic_get_redir_entries(int ioapic)
 {
        union IO_APIC_reg_01    reg_01;
@@ -2692,7 +2545,7 @@ void __init setup_ioapic_dest(void)
                else
                        mask = apic->target_cpus();
 
-               x86_io_apic_ops.set_affinity(idata, mask, false);
+               irq_set_affinity(irq, mask);
        }
 
 }
@@ -2737,7 +2590,7 @@ static struct resource * __init ioapic_setup_resources(void)
        return res;
 }
 
-void __init native_io_apic_init_mappings(void)
+void __init io_apic_init_mappings(void)
 {
        unsigned long ioapic_phys, idx = FIX_IO_APIC_BASE_0;
        struct resource *ioapic_res;
@@ -2962,7 +2815,6 @@ int mp_unregister_ioapic(u32 gsi_base)
 {
        int ioapic, pin;
        int found = 0;
-       struct mp_pin_info *pin_info;
 
        for_each_ioapic(ioapic)
                if (ioapics[ioapic].gsi_config.gsi_base == gsi_base) {
@@ -2975,11 +2827,17 @@ int mp_unregister_ioapic(u32 gsi_base)
        }
 
        for_each_pin(ioapic, pin) {
-               pin_info = mp_pin_info(ioapic, pin);
-               if (pin_info->count) {
-                       pr_warn("pin%d on IOAPIC%d is still in use.\n",
-                               pin, ioapic);
-                       return -EBUSY;
+               u32 gsi = mp_pin_to_gsi(ioapic, pin);
+               int irq = mp_map_gsi_to_irq(gsi, 0, NULL);
+               struct mp_chip_data *data;
+
+               if (irq >= 0) {
+                       data = irq_get_chip_data(irq);
+                       if (data && data->count) {
+                               pr_warn("pin%d on IOAPIC%d is still in use.\n",
+                                       pin, ioapic);
+                               return -EBUSY;
+                       }
                }
        }
 
@@ -3006,108 +2864,141 @@ int mp_ioapic_registered(u32 gsi_base)
        return 0;
 }
 
-static inline void set_io_apic_irq_attr(struct io_apic_irq_attr *irq_attr,
-                                       int ioapic, int ioapic_pin,
-                                       int trigger, int polarity)
+static void mp_irqdomain_get_attr(u32 gsi, struct mp_chip_data *data,
+                                 struct irq_alloc_info *info)
 {
-       irq_attr->ioapic        = ioapic;
-       irq_attr->ioapic_pin    = ioapic_pin;
-       irq_attr->trigger       = trigger;
-       irq_attr->polarity      = polarity;
+       if (info && info->ioapic_valid) {
+               data->trigger = info->ioapic_trigger;
+               data->polarity = info->ioapic_polarity;
+       } else if (acpi_get_override_irq(gsi, &data->trigger,
+                                        &data->polarity) < 0) {
+               /* PCI interrupts are always active low level triggered. */
+               data->trigger = IOAPIC_LEVEL;
+               data->polarity = IOAPIC_POL_LOW;
+       }
 }
 
-int mp_irqdomain_map(struct irq_domain *domain, unsigned int virq,
-                    irq_hw_number_t hwirq)
+static void mp_setup_entry(struct irq_cfg *cfg, struct mp_chip_data *data,
+                          struct IO_APIC_route_entry *entry)
 {
-       int ioapic = (int)(long)domain->host_data;
-       struct mp_pin_info *info = mp_pin_info(ioapic, hwirq);
-       struct io_apic_irq_attr attr;
+       memset(entry, 0, sizeof(*entry));
+       entry->delivery_mode = apic->irq_delivery_mode;
+       entry->dest_mode     = apic->irq_dest_mode;
+       entry->dest          = cfg->dest_apicid;
+       entry->vector        = cfg->vector;
+       entry->trigger       = data->trigger;
+       entry->polarity      = data->polarity;
+       /*
+        * Mask level triggered irqs. Edge triggered irqs are masked
+        * by the irq core code in case they fire.
+        */
+       if (data->trigger == IOAPIC_LEVEL)
+               entry->mask = IOAPIC_MASKED;
+       else
+               entry->mask = IOAPIC_UNMASKED;
+}
 
-       /* Get default attribute if not set by caller yet */
-       if (!info->set) {
-               u32 gsi = mp_pin_to_gsi(ioapic, hwirq);
+int mp_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
+                      unsigned int nr_irqs, void *arg)
+{
+       int ret, ioapic, pin;
+       struct irq_cfg *cfg;
+       struct irq_data *irq_data;
+       struct mp_chip_data *data;
+       struct irq_alloc_info *info = arg;
 
-               if (acpi_get_override_irq(gsi, &info->trigger,
-                                         &info->polarity) < 0) {
-                       /*
-                        * PCI interrupts are always polarity one level
-                        * triggered.
-                        */
-                       info->trigger = 1;
-                       info->polarity = 1;
-               }
-               info->node = NUMA_NO_NODE;
+       if (!info || nr_irqs > 1)
+               return -EINVAL;
+       irq_data = irq_domain_get_irq_data(domain, virq);
+       if (!irq_data)
+               return -EINVAL;
 
-               /*
-                * setup_IO_APIC_irqs() programs all legacy IRQs with default
-                * trigger and polarity attributes. Don't set the flag for that
-                * case so the first legacy IRQ user could reprogram the pin
-                * with real trigger and polarity attributes.
-                */
-               if (virq >= nr_legacy_irqs() || info->count)
-                       info->set = 1;
-       }
-       set_io_apic_irq_attr(&attr, ioapic, hwirq, info->trigger,
-                            info->polarity);
+       ioapic = mp_irqdomain_ioapic_idx(domain);
+       pin = info->ioapic_pin;
+       if (irq_find_mapping(domain, (irq_hw_number_t)pin) > 0)
+               return -EEXIST;
 
-       return io_apic_setup_irq_pin(virq, info->node, &attr);
-}
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
+               return -ENOMEM;
 
-void mp_irqdomain_unmap(struct irq_domain *domain, unsigned int virq)
-{
-       struct irq_data *data = irq_get_irq_data(virq);
-       struct irq_cfg *cfg = irq_cfg(virq);
-       int ioapic = (int)(long)domain->host_data;
-       int pin = (int)data->hwirq;
+       info->ioapic_entry = &data->entry;
+       ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, info);
+       if (ret < 0) {
+               kfree(data);
+               return ret;
+       }
+
+       INIT_LIST_HEAD(&data->irq_2_pin);
+       irq_data->hwirq = info->ioapic_pin;
+       irq_data->chip = (domain->parent == x86_vector_domain) ?
+                         &ioapic_chip : &ioapic_ir_chip;
+       irq_data->chip_data = data;
+       mp_irqdomain_get_attr(mp_pin_to_gsi(ioapic, pin), data, info);
+
+       cfg = irqd_cfg(irq_data);
+       add_pin_to_irq_node(data, ioapic_alloc_attr_node(info), ioapic, pin);
+       if (info->ioapic_entry)
+               mp_setup_entry(cfg, data, info->ioapic_entry);
+       mp_register_handler(virq, data->trigger);
+       if (virq < nr_legacy_irqs())
+               legacy_pic->mask(virq);
+
+       apic_printk(APIC_VERBOSE, KERN_DEBUG
+                   "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i Dest:%d)\n",
+                   ioapic, mpc_ioapic_id(ioapic), pin, cfg->vector,
+                   virq, data->trigger, data->polarity, cfg->dest_apicid);
 
-       ioapic_mask_entry(ioapic, pin);
-       __remove_pin_from_irq(cfg, ioapic, pin);
-       WARN_ON(!list_empty(&cfg->irq_2_pin));
-       arch_teardown_hwirq(virq);
+       return 0;
 }
 
-int mp_set_gsi_attr(u32 gsi, int trigger, int polarity, int node)
+void mp_irqdomain_free(struct irq_domain *domain, unsigned int virq,
+                      unsigned int nr_irqs)
 {
-       int ret = 0;
-       int ioapic, pin;
-       struct mp_pin_info *info;
+       struct irq_data *irq_data;
+       struct mp_chip_data *data;
 
-       ioapic = mp_find_ioapic(gsi);
-       if (ioapic < 0)
-               return -ENODEV;
-
-       pin = mp_find_ioapic_pin(ioapic, gsi);
-       info = mp_pin_info(ioapic, pin);
-       trigger = trigger ? 1 : 0;
-       polarity = polarity ? 1 : 0;
-
-       mutex_lock(&ioapic_mutex);
-       if (!info->set) {
-               info->trigger = trigger;
-               info->polarity = polarity;
-               info->node = node;
-               info->set = 1;
-       } else if (info->trigger != trigger || info->polarity != polarity) {
-               ret = -EBUSY;
+       BUG_ON(nr_irqs != 1);
+       irq_data = irq_domain_get_irq_data(domain, virq);
+       if (irq_data && irq_data->chip_data) {
+               data = irq_data->chip_data;
+               __remove_pin_from_irq(data, mp_irqdomain_ioapic_idx(domain),
+                                     (int)irq_data->hwirq);
+               WARN_ON(!list_empty(&data->irq_2_pin));
+               kfree(irq_data->chip_data);
        }
-       mutex_unlock(&ioapic_mutex);
-
-       return ret;
+       irq_domain_free_irqs_top(domain, virq, nr_irqs);
 }
 
-/* Enable IOAPIC early just for system timer */
-void __init pre_init_apic_IRQ0(void)
+void mp_irqdomain_activate(struct irq_domain *domain,
+                          struct irq_data *irq_data)
 {
-       struct io_apic_irq_attr attr = { 0, 0, 0, 0 };
+       unsigned long flags;
+       struct irq_pin_list *entry;
+       struct mp_chip_data *data = irq_data->chip_data;
 
-       printk(KERN_INFO "Early APIC setup for system timer0\n");
-#ifndef CONFIG_SMP
-       physid_set_mask_of_physid(boot_cpu_physical_apicid,
-                                        &phys_cpu_present_map);
-#endif
-       setup_local_APIC();
+       raw_spin_lock_irqsave(&ioapic_lock, flags);
+       for_each_irq_pin(entry, data->irq_2_pin)
+               __ioapic_write_entry(entry->apic, entry->pin, data->entry);
+       raw_spin_unlock_irqrestore(&ioapic_lock, flags);
+}
 
-       io_apic_setup_irq_pin(0, 0, &attr);
-       irq_set_chip_and_handler_name(0, &ioapic_chip, handle_edge_irq,
-                                     "edge");
+void mp_irqdomain_deactivate(struct irq_domain *domain,
+                            struct irq_data *irq_data)
+{
+       /* It won't be called for IRQ with multiple IOAPIC pins associated */
+       ioapic_mask_entry(mp_irqdomain_ioapic_idx(domain),
+                         (int)irq_data->hwirq);
+}
+
+int mp_irqdomain_ioapic_idx(struct irq_domain *domain)
+{
+       return (int)(long)domain->host_data;
 }
+
+const struct irq_domain_ops mp_ioapic_irqdomain_ops = {
+       .alloc          = mp_irqdomain_alloc,
+       .free           = mp_irqdomain_free,
+       .activate       = mp_irqdomain_activate,
+       .deactivate     = mp_irqdomain_deactivate,
+};
index d6ba2d660dc52f59945825ef80a66821d5a971a5..1a9d735e09c62f508d3226faafda2406eb75c843 100644 (file)
@@ -3,6 +3,8 @@
  *
  * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
  *     Moved from arch/x86/kernel/apic/io_apic.c.
+ * Jiang Liu <jiang.liu@linux.intel.com>
+ *     Convert to hierarchical irqdomain
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
 #include <linux/dmar.h>
 #include <linux/hpet.h>
 #include <linux/msi.h>
+#include <asm/irqdomain.h>
 #include <asm/msidef.h>
 #include <asm/hpet.h>
 #include <asm/hw_irq.h>
 #include <asm/apic.h>
 #include <asm/irq_remapping.h>
 
-void native_compose_msi_msg(struct pci_dev *pdev,
-                           unsigned int irq, unsigned int dest,
-                           struct msi_msg *msg, u8 hpet_id)
+static struct irq_domain *msi_default_domain;
+
+static void irq_msi_compose_msg(struct irq_data *data, struct msi_msg *msg)
 {
-       struct irq_cfg *cfg = irq_cfg(irq);
+       struct irq_cfg *cfg = irqd_cfg(data);
 
        msg->address_hi = MSI_ADDR_BASE_HI;
 
        if (x2apic_enabled())
-               msg->address_hi |= MSI_ADDR_EXT_DEST_ID(dest);
+               msg->address_hi |= MSI_ADDR_EXT_DEST_ID(cfg->dest_apicid);
 
        msg->address_lo =
                MSI_ADDR_BASE_LO |
@@ -39,7 +42,7 @@ void native_compose_msi_msg(struct pci_dev *pdev,
                ((apic->irq_delivery_mode != dest_LowestPrio) ?
                        MSI_ADDR_REDIRECTION_CPU :
                        MSI_ADDR_REDIRECTION_LOWPRI) |
-               MSI_ADDR_DEST_ID(dest);
+               MSI_ADDR_DEST_ID(cfg->dest_apicid);
 
        msg->data =
                MSI_DATA_TRIGGER_EDGE |
@@ -50,180 +53,201 @@ void native_compose_msi_msg(struct pci_dev *pdev,
                MSI_DATA_VECTOR(cfg->vector);
 }
 
-static int msi_compose_msg(struct pci_dev *pdev, unsigned int irq,
-                          struct msi_msg *msg, u8 hpet_id)
+/*
+ * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
+ * which implement the MSI or MSI-X Capability Structure.
+ */
+static struct irq_chip pci_msi_controller = {
+       .name                   = "PCI-MSI",
+       .irq_unmask             = pci_msi_unmask_irq,
+       .irq_mask               = pci_msi_mask_irq,
+       .irq_ack                = irq_chip_ack_parent,
+       .irq_retrigger          = irq_chip_retrigger_hierarchy,
+       .irq_compose_msi_msg    = irq_msi_compose_msg,
+       .flags                  = IRQCHIP_SKIP_SET_WAKE,
+};
+
+int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
 {
-       struct irq_cfg *cfg;
-       int err;
-       unsigned dest;
+       struct irq_domain *domain;
+       struct irq_alloc_info info;
 
-       if (disable_apic)
-               return -ENXIO;
+       init_irq_alloc_info(&info, NULL);
+       info.type = X86_IRQ_ALLOC_TYPE_MSI;
+       info.msi_dev = dev;
 
-       cfg = irq_cfg(irq);
-       err = assign_irq_vector(irq, cfg, apic->target_cpus());
-       if (err)
-               return err;
+       domain = irq_remapping_get_irq_domain(&info);
+       if (domain == NULL)
+               domain = msi_default_domain;
+       if (domain == NULL)
+               return -ENOSYS;
 
-       err = apic->cpu_mask_to_apicid_and(cfg->domain,
-                                          apic->target_cpus(), &dest);
-       if (err)
-               return err;
+       return pci_msi_domain_alloc_irqs(domain, dev, nvec, type);
+}
 
-       x86_msi.compose_msi_msg(pdev, irq, dest, msg, hpet_id);
+void native_teardown_msi_irq(unsigned int irq)
+{
+       irq_domain_free_irqs(irq, 1);
+}
 
-       return 0;
+static irq_hw_number_t pci_msi_get_hwirq(struct msi_domain_info *info,
+                                        msi_alloc_info_t *arg)
+{
+       return arg->msi_hwirq;
 }
 
-static int
-msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force)
+static int pci_msi_prepare(struct irq_domain *domain, struct device *dev,
+                          int nvec, msi_alloc_info_t *arg)
 {
-       struct irq_cfg *cfg = irqd_cfg(data);
-       struct msi_msg msg;
-       unsigned int dest;
-       int ret;
+       struct pci_dev *pdev = to_pci_dev(dev);
+       struct msi_desc *desc = first_pci_msi_entry(pdev);
+
+       init_irq_alloc_info(arg, NULL);
+       arg->msi_dev = pdev;
+       if (desc->msi_attrib.is_msix) {
+               arg->type = X86_IRQ_ALLOC_TYPE_MSIX;
+       } else {
+               arg->type = X86_IRQ_ALLOC_TYPE_MSI;
+               arg->flags |= X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
+       }
 
-       ret = apic_set_affinity(data, mask, &dest);
-       if (ret)
-               return ret;
+       return 0;
+}
 
-       __get_cached_msi_msg(data->msi_desc, &msg);
+static void pci_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
+{
+       arg->msi_hwirq = pci_msi_domain_calc_hwirq(arg->msi_dev, desc);
+}
+
+static struct msi_domain_ops pci_msi_domain_ops = {
+       .get_hwirq      = pci_msi_get_hwirq,
+       .msi_prepare    = pci_msi_prepare,
+       .set_desc       = pci_msi_set_desc,
+};
 
-       msg.data &= ~MSI_DATA_VECTOR_MASK;
-       msg.data |= MSI_DATA_VECTOR(cfg->vector);
-       msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
-       msg.address_lo |= MSI_ADDR_DEST_ID(dest);
+static struct msi_domain_info pci_msi_domain_info = {
+       .flags          = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+                         MSI_FLAG_PCI_MSIX,
+       .ops            = &pci_msi_domain_ops,
+       .chip           = &pci_msi_controller,
+       .handler        = handle_edge_irq,
+       .handler_name   = "edge",
+};
 
-       __pci_write_msi_msg(data->msi_desc, &msg);
+void arch_init_msi_domain(struct irq_domain *parent)
+{
+       if (disable_apic)
+               return;
 
-       return IRQ_SET_MASK_OK_NOCOPY;
+       msi_default_domain = pci_msi_create_irq_domain(NULL,
+                                       &pci_msi_domain_info, parent);
+       if (!msi_default_domain)
+               pr_warn("failed to initialize irqdomain for MSI/MSI-x.\n");
 }
 
-/*
- * IRQ Chip for MSI PCI/PCI-X/PCI-Express Devices,
- * which implement the MSI or MSI-X Capability Structure.
- */
-static struct irq_chip msi_chip = {
-       .name                   = "PCI-MSI",
+#ifdef CONFIG_IRQ_REMAP
+static struct irq_chip pci_msi_ir_controller = {
+       .name                   = "IR-PCI-MSI",
        .irq_unmask             = pci_msi_unmask_irq,
        .irq_mask               = pci_msi_mask_irq,
-       .irq_ack                = apic_ack_edge,
-       .irq_set_affinity       = msi_set_affinity,
-       .irq_retrigger          = apic_retrigger_irq,
+       .irq_ack                = irq_chip_ack_parent,
+       .irq_retrigger          = irq_chip_retrigger_hierarchy,
+       .irq_set_vcpu_affinity  = irq_chip_set_vcpu_affinity_parent,
        .flags                  = IRQCHIP_SKIP_SET_WAKE,
 };
 
-int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc,
-                 unsigned int irq_base, unsigned int irq_offset)
-{
-       struct irq_chip *chip = &msi_chip;
-       struct msi_msg msg;
-       unsigned int irq = irq_base + irq_offset;
-       int ret;
-
-       ret = msi_compose_msg(dev, irq, &msg, -1);
-       if (ret < 0)
-               return ret;
-
-       irq_set_msi_desc_off(irq_base, irq_offset, msidesc);
-
-       /*
-        * MSI-X message is written per-IRQ, the offset is always 0.
-        * MSI message denotes a contiguous group of IRQs, written for 0th IRQ.
-        */
-       if (!irq_offset)
-               pci_write_msi_msg(irq, &msg);
+static struct msi_domain_info pci_msi_ir_domain_info = {
+       .flags          = MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS |
+                         MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX,
+       .ops            = &pci_msi_domain_ops,
+       .chip           = &pci_msi_ir_controller,
+       .handler        = handle_edge_irq,
+       .handler_name   = "edge",
+};
 
-       setup_remapped_irq(irq, irq_cfg(irq), chip);
+struct irq_domain *arch_create_msi_irq_domain(struct irq_domain *parent)
+{
+       return pci_msi_create_irq_domain(NULL, &pci_msi_ir_domain_info, parent);
+}
+#endif
 
-       irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
+#ifdef CONFIG_DMAR_TABLE
+static void dmar_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
+{
+       dmar_msi_write(data->irq, msg);
+}
 
-       dev_dbg(&dev->dev, "irq %d for MSI/MSI-X\n", irq);
+static struct irq_chip dmar_msi_controller = {
+       .name                   = "DMAR-MSI",
+       .irq_unmask             = dmar_msi_unmask,
+       .irq_mask               = dmar_msi_mask,
+       .irq_ack                = irq_chip_ack_parent,
+       .irq_set_affinity       = msi_domain_set_affinity,
+       .irq_retrigger          = irq_chip_retrigger_hierarchy,
+       .irq_compose_msi_msg    = irq_msi_compose_msg,
+       .irq_write_msi_msg      = dmar_msi_write_msg,
+       .flags                  = IRQCHIP_SKIP_SET_WAKE,
+};
 
-       return 0;
+static irq_hw_number_t dmar_msi_get_hwirq(struct msi_domain_info *info,
+                                         msi_alloc_info_t *arg)
+{
+       return arg->dmar_id;
 }
 
-int native_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
+static int dmar_msi_init(struct irq_domain *domain,
+                        struct msi_domain_info *info, unsigned int virq,
+                        irq_hw_number_t hwirq, msi_alloc_info_t *arg)
 {
-       struct msi_desc *msidesc;
-       unsigned int irq;
-       int node, ret;
+       irq_domain_set_info(domain, virq, arg->dmar_id, info->chip, NULL,
+                           handle_edge_irq, arg->dmar_data, "edge");
 
-       /* Multiple MSI vectors only supported with interrupt remapping */
-       if (type == PCI_CAP_ID_MSI && nvec > 1)
-               return 1;
+       return 0;
+}
 
-       node = dev_to_node(&dev->dev);
+static struct msi_domain_ops dmar_msi_domain_ops = {
+       .get_hwirq      = dmar_msi_get_hwirq,
+       .msi_init       = dmar_msi_init,
+};
 
-       list_for_each_entry(msidesc, &dev->msi_list, list) {
-               irq = irq_alloc_hwirq(node);
-               if (!irq)
-                       return -ENOSPC;
+static struct msi_domain_info dmar_msi_domain_info = {
+       .ops            = &dmar_msi_domain_ops,
+       .chip           = &dmar_msi_controller,
+};
 
-               ret = setup_msi_irq(dev, msidesc, irq, 0);
-               if (ret < 0) {
-                       irq_free_hwirq(irq);
-                       return ret;
-               }
+static struct irq_domain *dmar_get_irq_domain(void)
+{
+       static struct irq_domain *dmar_domain;
+       static DEFINE_MUTEX(dmar_lock);
 
-       }
-       return 0;
-}
+       mutex_lock(&dmar_lock);
+       if (dmar_domain == NULL)
+               dmar_domain = msi_create_irq_domain(NULL, &dmar_msi_domain_info,
+                                                   x86_vector_domain);
+       mutex_unlock(&dmar_lock);
 
-void native_teardown_msi_irq(unsigned int irq)
-{
-       irq_free_hwirq(irq);
+       return dmar_domain;
 }
 
-#ifdef CONFIG_DMAR_TABLE
-static int
-dmar_msi_set_affinity(struct irq_data *data, const struct cpumask *mask,
-                     bool force)
+int dmar_alloc_hwirq(int id, int node, void *arg)
 {
-       struct irq_cfg *cfg = irqd_cfg(data);
-       unsigned int dest, irq = data->irq;
-       struct msi_msg msg;
-       int ret;
-
-       ret = apic_set_affinity(data, mask, &dest);
-       if (ret)
-               return ret;
+       struct irq_domain *domain = dmar_get_irq_domain();
+       struct irq_alloc_info info;
 
-       dmar_msi_read(irq, &msg);
+       if (!domain)
+               return -1;
 
-       msg.data &= ~MSI_DATA_VECTOR_MASK;
-       msg.data |= MSI_DATA_VECTOR(cfg->vector);
-       msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
-       msg.address_lo |= MSI_ADDR_DEST_ID(dest);
-       msg.address_hi = MSI_ADDR_BASE_HI | MSI_ADDR_EXT_DEST_ID(dest);
+       init_irq_alloc_info(&info, NULL);
+       info.type = X86_IRQ_ALLOC_TYPE_DMAR;
+       info.dmar_id = id;
+       info.dmar_data = arg;
 
-       dmar_msi_write(irq, &msg);
-
-       return IRQ_SET_MASK_OK_NOCOPY;
+       return irq_domain_alloc_irqs(domain, 1, node, &info);
 }
 
-static struct irq_chip dmar_msi_type = {
-       .name                   = "DMAR_MSI",
-       .irq_unmask             = dmar_msi_unmask,
-       .irq_mask               = dmar_msi_mask,
-       .irq_ack                = apic_ack_edge,
-       .irq_set_affinity       = dmar_msi_set_affinity,
-       .irq_retrigger          = apic_retrigger_irq,
-       .flags                  = IRQCHIP_SKIP_SET_WAKE,
-};
-
-int arch_setup_dmar_msi(unsigned int irq)
+void dmar_free_hwirq(int irq)
 {
-       int ret;
-       struct msi_msg msg;
-
-       ret = msi_compose_msg(NULL, irq, &msg, -1);
-       if (ret < 0)
-               return ret;
-       dmar_msi_write(irq, &msg);
-       irq_set_chip_and_handler_name(irq, &dmar_msi_type, handle_edge_irq,
-                                     "edge");
-       return 0;
+       irq_domain_free_irqs(irq, 1);
 }
 #endif
 
@@ -231,56 +255,103 @@ int arch_setup_dmar_msi(unsigned int irq)
  * MSI message composition
  */
 #ifdef CONFIG_HPET_TIMER
+static inline int hpet_dev_id(struct irq_domain *domain)
+{
+       struct msi_domain_info *info = msi_get_domain_info(domain);
+
+       return (int)(long)info->data;
+}
 
-static int hpet_msi_set_affinity(struct irq_data *data,
-                                const struct cpumask *mask, bool force)
+static void hpet_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
 {
-       struct irq_cfg *cfg = irqd_cfg(data);
-       struct msi_msg msg;
-       unsigned int dest;
-       int ret;
+       hpet_msi_write(data->handler_data, msg);
+}
 
-       ret = apic_set_affinity(data, mask, &dest);
-       if (ret)
-               return ret;
+static struct irq_chip hpet_msi_controller = {
+       .name = "HPET-MSI",
+       .irq_unmask = hpet_msi_unmask,
+       .irq_mask = hpet_msi_mask,
+       .irq_ack = irq_chip_ack_parent,
+       .irq_set_affinity = msi_domain_set_affinity,
+       .irq_retrigger = irq_chip_retrigger_hierarchy,
+       .irq_compose_msi_msg = irq_msi_compose_msg,
+       .irq_write_msi_msg = hpet_msi_write_msg,
+       .flags = IRQCHIP_SKIP_SET_WAKE,
+};
 
-       hpet_msi_read(data->handler_data, &msg);
+static irq_hw_number_t hpet_msi_get_hwirq(struct msi_domain_info *info,
+                                         msi_alloc_info_t *arg)
+{
+       return arg->hpet_index;
+}
 
-       msg.data &= ~MSI_DATA_VECTOR_MASK;
-       msg.data |= MSI_DATA_VECTOR(cfg->vector);
-       msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK;
-       msg.address_lo |= MSI_ADDR_DEST_ID(dest);
+static int hpet_msi_init(struct irq_domain *domain,
+                        struct msi_domain_info *info, unsigned int virq,
+                        irq_hw_number_t hwirq, msi_alloc_info_t *arg)
+{
+       irq_set_status_flags(virq, IRQ_MOVE_PCNTXT);
+       irq_domain_set_info(domain, virq, arg->hpet_index, info->chip, NULL,
+                           handle_edge_irq, arg->hpet_data, "edge");
 
-       hpet_msi_write(data->handler_data, &msg);
+       return 0;
+}
 
-       return IRQ_SET_MASK_OK_NOCOPY;
+static void hpet_msi_free(struct irq_domain *domain,
+                         struct msi_domain_info *info, unsigned int virq)
+{
+       irq_clear_status_flags(virq, IRQ_MOVE_PCNTXT);
 }
 
-static struct irq_chip hpet_msi_type = {
-       .name = "HPET_MSI",
-       .irq_unmask = hpet_msi_unmask,
-       .irq_mask = hpet_msi_mask,
-       .irq_ack = apic_ack_edge,
-       .irq_set_affinity = hpet_msi_set_affinity,
-       .irq_retrigger = apic_retrigger_irq,
-       .flags = IRQCHIP_SKIP_SET_WAKE,
+static struct msi_domain_ops hpet_msi_domain_ops = {
+       .get_hwirq      = hpet_msi_get_hwirq,
+       .msi_init       = hpet_msi_init,
+       .msi_free       = hpet_msi_free,
+};
+
+static struct msi_domain_info hpet_msi_domain_info = {
+       .ops            = &hpet_msi_domain_ops,
+       .chip           = &hpet_msi_controller,
 };
 
-int default_setup_hpet_msi(unsigned int irq, unsigned int id)
+struct irq_domain *hpet_create_irq_domain(int hpet_id)
 {
-       struct irq_chip *chip = &hpet_msi_type;
-       struct msi_msg msg;
-       int ret;
+       struct irq_domain *parent;
+       struct irq_alloc_info info;
+       struct msi_domain_info *domain_info;
+
+       if (x86_vector_domain == NULL)
+               return NULL;
+
+       domain_info = kzalloc(sizeof(*domain_info), GFP_KERNEL);
+       if (!domain_info)
+               return NULL;
+
+       *domain_info = hpet_msi_domain_info;
+       domain_info->data = (void *)(long)hpet_id;
+
+       init_irq_alloc_info(&info, NULL);
+       info.type = X86_IRQ_ALLOC_TYPE_HPET;
+       info.hpet_id = hpet_id;
+       parent = irq_remapping_get_ir_irq_domain(&info);
+       if (parent == NULL)
+               parent = x86_vector_domain;
+       else
+               hpet_msi_controller.name = "IR-HPET-MSI";
+
+       return msi_create_irq_domain(NULL, domain_info, parent);
+}
 
-       ret = msi_compose_msg(NULL, irq, &msg, id);
-       if (ret < 0)
-               return ret;
+int hpet_assign_irq(struct irq_domain *domain, struct hpet_dev *dev,
+                   int dev_num)
+{
+       struct irq_alloc_info info;
 
-       hpet_msi_write(irq_get_handler_data(irq), &msg);
-       irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
-       setup_remapped_irq(irq, irq_cfg(irq), chip);
+       init_irq_alloc_info(&info, NULL);
+       info.type = X86_IRQ_ALLOC_TYPE_HPET;
+       info.hpet_data = dev;
+       info.hpet_id = hpet_dev_id(domain);
+       info.hpet_index = dev_num;
 
-       irq_set_chip_and_handler_name(irq, chip, handle_edge_irq, "edge");
-       return 0;
+       return irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, &info);
 }
 #endif
index 6cedd79145813cc792c891573ac03b3e18d9e529..28eba2d38b1570c77f3b82543d6ee84686333027 100644 (file)
@@ -3,6 +3,8 @@
  *
  * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo
  *     Moved from arch/x86/kernel/apic/io_apic.c.
+ * Jiang Liu <jiang.liu@linux.intel.com>
+ *     Enable support of hierarchical irqdomains
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License version 2 as
 #include <linux/interrupt.h>
 #include <linux/init.h>
 #include <linux/compiler.h>
-#include <linux/irqdomain.h>
 #include <linux/slab.h>
+#include <asm/irqdomain.h>
 #include <asm/hw_irq.h>
 #include <asm/apic.h>
 #include <asm/i8259.h>
 #include <asm/desc.h>
 #include <asm/irq_remapping.h>
 
+struct apic_chip_data {
+       struct irq_cfg          cfg;
+       cpumask_var_t           domain;
+       cpumask_var_t           old_domain;
+       u8                      move_in_progress : 1;
+};
+
+struct irq_domain *x86_vector_domain;
 static DEFINE_RAW_SPINLOCK(vector_lock);
+static cpumask_var_t vector_cpumask;
+static struct irq_chip lapic_controller;
+#ifdef CONFIG_X86_IO_APIC
+static struct apic_chip_data *legacy_irq_data[NR_IRQS_LEGACY];
+#endif
 
 void lock_vector_lock(void)
 {
@@ -34,71 +49,59 @@ void unlock_vector_lock(void)
        raw_spin_unlock(&vector_lock);
 }
 
-struct irq_cfg *irq_cfg(unsigned int irq)
+static struct apic_chip_data *apic_chip_data(struct irq_data *irq_data)
 {
-       return irq_get_chip_data(irq);
+       if (!irq_data)
+               return NULL;
+
+       while (irq_data->parent_data)
+               irq_data = irq_data->parent_data;
+
+       return irq_data->chip_data;
 }
 
 struct irq_cfg *irqd_cfg(struct irq_data *irq_data)
 {
-       return irq_data->chip_data;
+       struct apic_chip_data *data = apic_chip_data(irq_data);
+
+       return data ? &data->cfg : NULL;
 }
 
-static struct irq_cfg *alloc_irq_cfg(unsigned int irq, int node)
+struct irq_cfg *irq_cfg(unsigned int irq)
 {
-       struct irq_cfg *cfg;
+       return irqd_cfg(irq_get_irq_data(irq));
+}
 
-       cfg = kzalloc_node(sizeof(*cfg), GFP_KERNEL, node);
-       if (!cfg)
+static struct apic_chip_data *alloc_apic_chip_data(int node)
+{
+       struct apic_chip_data *data;
+
+       data = kzalloc_node(sizeof(*data), GFP_KERNEL, node);
+       if (!data)
                return NULL;
-       if (!zalloc_cpumask_var_node(&cfg->domain, GFP_KERNEL, node))
-               goto out_cfg;
-       if (!zalloc_cpumask_var_node(&cfg->old_domain, GFP_KERNEL, node))
+       if (!zalloc_cpumask_var_node(&data->domain, GFP_KERNEL, node))
+               goto out_data;
+       if (!zalloc_cpumask_var_node(&data->old_domain, GFP_KERNEL, node))
                goto out_domain;
-#ifdef CONFIG_X86_IO_APIC
-       INIT_LIST_HEAD(&cfg->irq_2_pin);
-#endif
-       return cfg;
+       return data;
 out_domain:
-       free_cpumask_var(cfg->domain);
-out_cfg:
-       kfree(cfg);
+       free_cpumask_var(data->domain);
+out_data:
+       kfree(data);
        return NULL;
 }
 
-struct irq_cfg *alloc_irq_and_cfg_at(unsigned int at, int node)
+static void free_apic_chip_data(struct apic_chip_data *data)
 {
-       int res = irq_alloc_desc_at(at, node);
-       struct irq_cfg *cfg;
-
-       if (res < 0) {
-               if (res != -EEXIST)
-                       return NULL;
-               cfg = irq_cfg(at);
-               if (cfg)
-                       return cfg;
+       if (data) {
+               free_cpumask_var(data->domain);
+               free_cpumask_var(data->old_domain);
+               kfree(data);
        }
-
-       cfg = alloc_irq_cfg(at, node);
-       if (cfg)
-               irq_set_chip_data(at, cfg);
-       else
-               irq_free_desc(at);
-       return cfg;
-}
-
-static void free_irq_cfg(unsigned int at, struct irq_cfg *cfg)
-{
-       if (!cfg)
-               return;
-       irq_set_chip_data(at, NULL);
-       free_cpumask_var(cfg->domain);
-       free_cpumask_var(cfg->old_domain);
-       kfree(cfg);
 }
 
-static int
-__assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
+static int __assign_irq_vector(int irq, struct apic_chip_data *d,
+                              const struct cpumask *mask)
 {
        /*
         * NOTE! The local APIC isn't very good at handling
@@ -114,36 +117,33 @@ __assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
        static int current_vector = FIRST_EXTERNAL_VECTOR + VECTOR_OFFSET_START;
        static int current_offset = VECTOR_OFFSET_START % 16;
        int cpu, err;
-       cpumask_var_t tmp_mask;
 
-       if (cfg->move_in_progress)
+       if (d->move_in_progress)
                return -EBUSY;
 
-       if (!alloc_cpumask_var(&tmp_mask, GFP_ATOMIC))
-               return -ENOMEM;
-
        /* Only try and allocate irqs on cpus that are present */
        err = -ENOSPC;
-       cpumask_clear(cfg->old_domain);
+       cpumask_clear(d->old_domain);
        cpu = cpumask_first_and(mask, cpu_online_mask);
        while (cpu < nr_cpu_ids) {
                int new_cpu, vector, offset;
 
-               apic->vector_allocation_domain(cpu, tmp_mask, mask);
+               apic->vector_allocation_domain(cpu, vector_cpumask, mask);
 
-               if (cpumask_subset(tmp_mask, cfg->domain)) {
+               if (cpumask_subset(vector_cpumask, d->domain)) {
                        err = 0;
-                       if (cpumask_equal(tmp_mask, cfg->domain))
+                       if (cpumask_equal(vector_cpumask, d->domain))
                                break;
                        /*
                         * New cpumask using the vector is a proper subset of
                         * the current in use mask. So cleanup the vector
                         * allocation for the members that are not used anymore.
                         */
-                       cpumask_andnot(cfg->old_domain, cfg->domain, tmp_mask);
-                       cfg->move_in_progress =
-                          cpumask_intersects(cfg->old_domain, cpu_online_mask);
-                       cpumask_and(cfg->domain, cfg->domain, tmp_mask);
+                       cpumask_andnot(d->old_domain, d->domain,
+                                      vector_cpumask);
+                       d->move_in_progress =
+                          cpumask_intersects(d->old_domain, cpu_online_mask);
+                       cpumask_and(d->domain, d->domain, vector_cpumask);
                        break;
                }
 
@@ -157,16 +157,18 @@ next:
                }
 
                if (unlikely(current_vector == vector)) {
-                       cpumask_or(cfg->old_domain, cfg->old_domain, tmp_mask);
-                       cpumask_andnot(tmp_mask, mask, cfg->old_domain);
-                       cpu = cpumask_first_and(tmp_mask, cpu_online_mask);
+                       cpumask_or(d->old_domain, d->old_domain,
+                                  vector_cpumask);
+                       cpumask_andnot(vector_cpumask, mask, d->old_domain);
+                       cpu = cpumask_first_and(vector_cpumask,
+                                               cpu_online_mask);
                        continue;
                }
 
                if (test_bit(vector, used_vectors))
                        goto next;
 
-               for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask) {
+               for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask) {
                        if (per_cpu(vector_irq, new_cpu)[vector] >
                            VECTOR_UNDEFINED)
                                goto next;
@@ -174,55 +176,73 @@ next:
                /* Found one! */
                current_vector = vector;
                current_offset = offset;
-               if (cfg->vector) {
-                       cpumask_copy(cfg->old_domain, cfg->domain);
-                       cfg->move_in_progress =
-                          cpumask_intersects(cfg->old_domain, cpu_online_mask);
+               if (d->cfg.vector) {
+                       cpumask_copy(d->old_domain, d->domain);
+                       d->move_in_progress =
+                          cpumask_intersects(d->old_domain, cpu_online_mask);
                }
-               for_each_cpu_and(new_cpu, tmp_mask, cpu_online_mask)
+               for_each_cpu_and(new_cpu, vector_cpumask, cpu_online_mask)
                        per_cpu(vector_irq, new_cpu)[vector] = irq;
-               cfg->vector = vector;
-               cpumask_copy(cfg->domain, tmp_mask);
+               d->cfg.vector = vector;
+               cpumask_copy(d->domain, vector_cpumask);
                err = 0;
                break;
        }
-       free_cpumask_var(tmp_mask);
+
+       if (!err) {
+               /* cache destination APIC IDs into cfg->dest_apicid */
+               err = apic->cpu_mask_to_apicid_and(mask, d->domain,
+                                                  &d->cfg.dest_apicid);
+       }
 
        return err;
 }
 
-int assign_irq_vector(int irq, struct irq_cfg *cfg, const struct cpumask *mask)
+static int assign_irq_vector(int irq, struct apic_chip_data *data,
+                            const struct cpumask *mask)
 {
        int err;
        unsigned long flags;
 
        raw_spin_lock_irqsave(&vector_lock, flags);
-       err = __assign_irq_vector(irq, cfg, mask);
+       err = __assign_irq_vector(irq, data, mask);
        raw_spin_unlock_irqrestore(&vector_lock, flags);
        return err;
 }
 
-void clear_irq_vector(int irq, struct irq_cfg *cfg)
+static int assign_irq_vector_policy(int irq, int node,
+                                   struct apic_chip_data *data,
+                                   struct irq_alloc_info *info)
+{
+       if (info && info->mask)
+               return assign_irq_vector(irq, data, info->mask);
+       if (node != NUMA_NO_NODE &&
+           assign_irq_vector(irq, data, cpumask_of_node(node)) == 0)
+               return 0;
+       return assign_irq_vector(irq, data, apic->target_cpus());
+}
+
+static void clear_irq_vector(int irq, struct apic_chip_data *data)
 {
        int cpu, vector;
        unsigned long flags;
 
        raw_spin_lock_irqsave(&vector_lock, flags);
-       BUG_ON(!cfg->vector);
+       BUG_ON(!data->cfg.vector);
 
-       vector = cfg->vector;
-       for_each_cpu_and(cpu, cfg->domain, cpu_online_mask)
+       vector = data->cfg.vector;
+       for_each_cpu_and(cpu, data->domain, cpu_online_mask)
                per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
 
-       cfg->vector = 0;
-       cpumask_clear(cfg->domain);
+       data->cfg.vector = 0;
+       cpumask_clear(data->domain);
 
-       if (likely(!cfg->move_in_progress)) {
+       if (likely(!data->move_in_progress)) {
                raw_spin_unlock_irqrestore(&vector_lock, flags);
                return;
        }
 
-       for_each_cpu_and(cpu, cfg->old_domain, cpu_online_mask) {
+       for_each_cpu_and(cpu, data->old_domain, cpu_online_mask) {
                for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS;
                     vector++) {
                        if (per_cpu(vector_irq, cpu)[vector] != irq)
@@ -231,10 +251,95 @@ void clear_irq_vector(int irq, struct irq_cfg *cfg)
                        break;
                }
        }
-       cfg->move_in_progress = 0;
+       data->move_in_progress = 0;
        raw_spin_unlock_irqrestore(&vector_lock, flags);
 }
 
+void init_irq_alloc_info(struct irq_alloc_info *info,
+                        const struct cpumask *mask)
+{
+       memset(info, 0, sizeof(*info));
+       info->mask = mask;
+}
+
+void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src)
+{
+       if (src)
+               *dst = *src;
+       else
+               memset(dst, 0, sizeof(*dst));
+}
+
+static void x86_vector_free_irqs(struct irq_domain *domain,
+                                unsigned int virq, unsigned int nr_irqs)
+{
+       struct irq_data *irq_data;
+       int i;
+
+       for (i = 0; i < nr_irqs; i++) {
+               irq_data = irq_domain_get_irq_data(x86_vector_domain, virq + i);
+               if (irq_data && irq_data->chip_data) {
+                       clear_irq_vector(virq + i, irq_data->chip_data);
+                       free_apic_chip_data(irq_data->chip_data);
+#ifdef CONFIG_X86_IO_APIC
+                       if (virq + i < nr_legacy_irqs())
+                               legacy_irq_data[virq + i] = NULL;
+#endif
+                       irq_domain_reset_irq_data(irq_data);
+               }
+       }
+}
+
+static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq,
+                                unsigned int nr_irqs, void *arg)
+{
+       struct irq_alloc_info *info = arg;
+       struct apic_chip_data *data;
+       struct irq_data *irq_data;
+       int i, err;
+
+       if (disable_apic)
+               return -ENXIO;
+
+       /* Currently vector allocator can't guarantee contiguous allocations */
+       if ((info->flags & X86_IRQ_ALLOC_CONTIGUOUS_VECTORS) && nr_irqs > 1)
+               return -ENOSYS;
+
+       for (i = 0; i < nr_irqs; i++) {
+               irq_data = irq_domain_get_irq_data(domain, virq + i);
+               BUG_ON(!irq_data);
+#ifdef CONFIG_X86_IO_APIC
+               if (virq + i < nr_legacy_irqs() && legacy_irq_data[virq + i])
+                       data = legacy_irq_data[virq + i];
+               else
+#endif
+                       data = alloc_apic_chip_data(irq_data->node);
+               if (!data) {
+                       err = -ENOMEM;
+                       goto error;
+               }
+
+               irq_data->chip = &lapic_controller;
+               irq_data->chip_data = data;
+               irq_data->hwirq = virq + i;
+               err = assign_irq_vector_policy(virq, irq_data->node, data,
+                                              info);
+               if (err)
+                       goto error;
+       }
+
+       return 0;
+
+error:
+       x86_vector_free_irqs(domain, virq, i + 1);
+       return err;
+}
+
+static const struct irq_domain_ops x86_vector_domain_ops = {
+       .alloc  = x86_vector_alloc_irqs,
+       .free   = x86_vector_free_irqs,
+};
+
 int __init arch_probe_nr_irqs(void)
 {
        int nr;
@@ -258,8 +363,43 @@ int __init arch_probe_nr_irqs(void)
        return nr_legacy_irqs();
 }
 
+#ifdef CONFIG_X86_IO_APIC
+static void init_legacy_irqs(void)
+{
+       int i, node = cpu_to_node(0);
+       struct apic_chip_data *data;
+
+       /*
+        * For legacy IRQ's, start with assigning irq0 to irq15 to
+        * ISA_IRQ_VECTOR(i) for all cpu's.
+        */
+       for (i = 0; i < nr_legacy_irqs(); i++) {
+               data = legacy_irq_data[i] = alloc_apic_chip_data(node);
+               BUG_ON(!data);
+
+               data->cfg.vector = ISA_IRQ_VECTOR(i);
+               cpumask_setall(data->domain);
+               irq_set_chip_data(i, data);
+       }
+}
+#else
+static void init_legacy_irqs(void) { }
+#endif
+
 int __init arch_early_irq_init(void)
 {
+       init_legacy_irqs();
+
+       x86_vector_domain = irq_domain_add_tree(NULL, &x86_vector_domain_ops,
+                                               NULL);
+       BUG_ON(x86_vector_domain == NULL);
+       irq_set_default_host(x86_vector_domain);
+
+       arch_init_msi_domain(x86_vector_domain);
+       arch_init_htirq_domain(x86_vector_domain);
+
+       BUG_ON(!alloc_cpumask_var(&vector_cpumask, GFP_KERNEL));
+
        return arch_early_ioapic_init();
 }
 
@@ -267,7 +407,7 @@ static void __setup_vector_irq(int cpu)
 {
        /* Initialize vector_irq on a new cpu */
        int irq, vector;
-       struct irq_cfg *cfg;
+       struct apic_chip_data *data;
 
        /*
         * vector_lock will make sure that we don't run into irq vector
@@ -277,13 +417,13 @@ static void __setup_vector_irq(int cpu)
        raw_spin_lock(&vector_lock);
        /* Mark the inuse vectors */
        for_each_active_irq(irq) {
-               cfg = irq_cfg(irq);
-               if (!cfg)
+               data = apic_chip_data(irq_get_irq_data(irq));
+               if (!data)
                        continue;
 
-               if (!cpumask_test_cpu(cpu, cfg->domain))
+               if (!cpumask_test_cpu(cpu, data->domain))
                        continue;
-               vector = cfg->vector;
+               vector = data->cfg.vector;
                per_cpu(vector_irq, cpu)[vector] = irq;
        }
        /* Mark the free vectors */
@@ -292,8 +432,8 @@ static void __setup_vector_irq(int cpu)
                if (irq <= VECTOR_UNDEFINED)
                        continue;
 
-               cfg = irq_cfg(irq);
-               if (!cpumask_test_cpu(cpu, cfg->domain))
+               data = apic_chip_data(irq_get_irq_data(irq));
+               if (!cpumask_test_cpu(cpu, data->domain))
                        per_cpu(vector_irq, cpu)[vector] = VECTOR_UNDEFINED;
        }
        raw_spin_unlock(&vector_lock);
@@ -314,20 +454,20 @@ void setup_vector_irq(int cpu)
         * legacy vector to irq mapping:
         */
        for (irq = 0; irq < nr_legacy_irqs(); irq++)
-               per_cpu(vector_irq, cpu)[IRQ0_VECTOR + irq] = irq;
+               per_cpu(vector_irq, cpu)[ISA_IRQ_VECTOR(irq)] = irq;
 
        __setup_vector_irq(cpu);
 }
 
-int apic_retrigger_irq(struct irq_data *data)
+static int apic_retrigger_irq(struct irq_data *irq_data)
 {
-       struct irq_cfg *cfg = irqd_cfg(data);
+       struct apic_chip_data *data = apic_chip_data(irq_data);
        unsigned long flags;
        int cpu;
 
        raw_spin_lock_irqsave(&vector_lock, flags);
-       cpu = cpumask_first_and(cfg->domain, cpu_online_mask);
-       apic->send_IPI_mask(cpumask_of(cpu), cfg->vector);
+       cpu = cpumask_first_and(data->domain, cpu_online_mask);
+       apic->send_IPI_mask(cpumask_of(cpu), data->cfg.vector);
        raw_spin_unlock_irqrestore(&vector_lock, flags);
 
        return 1;
@@ -340,73 +480,76 @@ void apic_ack_edge(struct irq_data *data)
        ack_APIC_irq();
 }
 
-/*
- * Either sets data->affinity to a valid value, and returns
- * ->cpu_mask_to_apicid of that in dest_id, or returns -1 and
- * leaves data->affinity untouched.
- */
-int apic_set_affinity(struct irq_data *data, const struct cpumask *mask,
-                     unsigned int *dest_id)
+static int apic_set_affinity(struct irq_data *irq_data,
+                            const struct cpumask *dest, bool force)
 {
-       struct irq_cfg *cfg = irqd_cfg(data);
-       unsigned int irq = data->irq;
-       int err;
+       struct apic_chip_data *data = irq_data->chip_data;
+       int err, irq = irq_data->irq;
 
        if (!config_enabled(CONFIG_SMP))
                return -EPERM;
 
-       if (!cpumask_intersects(mask, cpu_online_mask))
+       if (!cpumask_intersects(dest, cpu_online_mask))
                return -EINVAL;
 
-       err = assign_irq_vector(irq, cfg, mask);
-       if (err)
-               return err;
-
-       err = apic->cpu_mask_to_apicid_and(mask, cfg->domain, dest_id);
+       err = assign_irq_vector(irq, data, dest);
        if (err) {
-               if (assign_irq_vector(irq, cfg, data->affinity))
+               struct irq_data *top = irq_get_irq_data(irq);
+
+               if (assign_irq_vector(irq, data, top->affinity))
                        pr_err("Failed to recover vector for irq %d\n", irq);
                return err;
        }
 
-       cpumask_copy(data->affinity, mask);
-
-       return 0;
+       return IRQ_SET_MASK_OK;
 }
 
+static struct irq_chip lapic_controller = {
+       .irq_ack                = apic_ack_edge,
+       .irq_set_affinity       = apic_set_affinity,
+       .irq_retrigger          = apic_retrigger_irq,
+};
+
 #ifdef CONFIG_SMP
-void send_cleanup_vector(struct irq_cfg *cfg)
+static void __send_cleanup_vector(struct apic_chip_data *data)
 {
        cpumask_var_t cleanup_mask;
 
        if (unlikely(!alloc_cpumask_var(&cleanup_mask, GFP_ATOMIC))) {
                unsigned int i;
 
-               for_each_cpu_and(i, cfg->old_domain, cpu_online_mask)
+               for_each_cpu_and(i, data->old_domain, cpu_online_mask)
                        apic->send_IPI_mask(cpumask_of(i),
                                            IRQ_MOVE_CLEANUP_VECTOR);
        } else {
-               cpumask_and(cleanup_mask, cfg->old_domain, cpu_online_mask);
+               cpumask_and(cleanup_mask, data->old_domain, cpu_online_mask);
                apic->send_IPI_mask(cleanup_mask, IRQ_MOVE_CLEANUP_VECTOR);
                free_cpumask_var(cleanup_mask);
        }
-       cfg->move_in_progress = 0;
+       data->move_in_progress = 0;
+}
+
+void send_cleanup_vector(struct irq_cfg *cfg)
+{
+       struct apic_chip_data *data;
+
+       data = container_of(cfg, struct apic_chip_data, cfg);
+       if (data->move_in_progress)
+               __send_cleanup_vector(data);
 }
 
 asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
 {
        unsigned vector, me;
 
-       ack_APIC_irq();
-       irq_enter();
-       exit_idle();
+       entering_ack_irq();
 
        me = smp_processor_id();
        for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
                int irq;
                unsigned int irr;
                struct irq_desc *desc;
-               struct irq_cfg *cfg;
+               struct apic_chip_data *data;
 
                irq = __this_cpu_read(vector_irq[vector]);
 
@@ -417,8 +560,8 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
                if (!desc)
                        continue;
 
-               cfg = irq_cfg(irq);
-               if (!cfg)
+               data = apic_chip_data(&desc->irq_data);
+               if (!data)
                        continue;
 
                raw_spin_lock(&desc->lock);
@@ -427,10 +570,11 @@ asmlinkage __visible void smp_irq_move_cleanup_interrupt(void)
                 * Check if the irq migration is in progress. If so, we
                 * haven't received the cleanup request yet for this irq.
                 */
-               if (cfg->move_in_progress)
+               if (data->move_in_progress)
                        goto unlock;
 
-               if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
+               if (vector == data->cfg.vector &&
+                   cpumask_test_cpu(me, data->domain))
                        goto unlock;
 
                irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
@@ -450,20 +594,21 @@ unlock:
                raw_spin_unlock(&desc->lock);
        }
 
-       irq_exit();
+       exiting_irq();
 }
 
 static void __irq_complete_move(struct irq_cfg *cfg, unsigned vector)
 {
        unsigned me;
+       struct apic_chip_data *data;
 
-       if (likely(!cfg->move_in_progress))
+       data = container_of(cfg, struct apic_chip_data, cfg);
+       if (likely(!data->move_in_progress))
                return;
 
        me = smp_processor_id();
-
-       if (vector == cfg->vector && cpumask_test_cpu(me, cfg->domain))
-               send_cleanup_vector(cfg);
+       if (vector == data->cfg.vector && cpumask_test_cpu(me, data->domain))
+               __send_cleanup_vector(data);
 }
 
 void irq_complete_move(struct irq_cfg *cfg)
@@ -475,46 +620,11 @@ void irq_force_complete_move(int irq)
 {
        struct irq_cfg *cfg = irq_cfg(irq);
 
-       if (!cfg)
-               return;
-
-       __irq_complete_move(cfg, cfg->vector);
+       if (cfg)
+               __irq_complete_move(cfg, cfg->vector);
 }
 #endif
 
-/*
- * Dynamic irq allocate and deallocation. Should be replaced by irq domains!
- */
-int arch_setup_hwirq(unsigned int irq, int node)
-{
-       struct irq_cfg *cfg;
-       unsigned long flags;
-       int ret;
-
-       cfg = alloc_irq_cfg(irq, node);
-       if (!cfg)
-               return -ENOMEM;
-
-       raw_spin_lock_irqsave(&vector_lock, flags);
-       ret = __assign_irq_vector(irq, cfg, apic->target_cpus());
-       raw_spin_unlock_irqrestore(&vector_lock, flags);
-
-       if (!ret)
-               irq_set_chip_data(irq, cfg);
-       else
-               free_irq_cfg(irq, cfg);
-       return ret;
-}
-
-void arch_teardown_hwirq(unsigned int irq)
-{
-       struct irq_cfg *cfg = irq_cfg(irq);
-
-       free_remapped_irq(irq);
-       clear_irq_vector(irq, cfg);
-       free_irq_cfg(irq, cfg);
-}
-
 static void __init print_APIC_field(int base)
 {
        int i;
index 6fae733e9194893dc761ccd06839df81232c7427..3ffd925655e0d4a8338d401d2793a61a5cb3fd6f 100644 (file)
@@ -21,11 +21,13 @@ early_param("x2apic_phys", set_x2apic_phys_mode);
 
 static bool x2apic_fadt_phys(void)
 {
+#ifdef CONFIG_ACPI
        if ((acpi_gbl_FADT.header.revision >= FADT2_REVISION_ID) &&
                (acpi_gbl_FADT.flags & ACPI_FADT_APIC_PHYSICAL)) {
                printk(KERN_DEBUG "System requires x2apic physical mode\n");
                return true;
        }
+#endif
        return false;
 }
 
index 9f6b9341950f7895b247b1c320cc0f9cd75cda9d..8e3d22a1af94094b749abca95187b073403d628c 100644 (file)
@@ -41,6 +41,25 @@ void common(void) {
        OFFSET(pbe_orig_address, pbe, orig_address);
        OFFSET(pbe_next, pbe, next);
 
+#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
+       BLANK();
+       OFFSET(IA32_SIGCONTEXT_ax, sigcontext_ia32, ax);
+       OFFSET(IA32_SIGCONTEXT_bx, sigcontext_ia32, bx);
+       OFFSET(IA32_SIGCONTEXT_cx, sigcontext_ia32, cx);
+       OFFSET(IA32_SIGCONTEXT_dx, sigcontext_ia32, dx);
+       OFFSET(IA32_SIGCONTEXT_si, sigcontext_ia32, si);
+       OFFSET(IA32_SIGCONTEXT_di, sigcontext_ia32, di);
+       OFFSET(IA32_SIGCONTEXT_bp, sigcontext_ia32, bp);
+       OFFSET(IA32_SIGCONTEXT_sp, sigcontext_ia32, sp);
+       OFFSET(IA32_SIGCONTEXT_ip, sigcontext_ia32, ip);
+
+       BLANK();
+       OFFSET(TI_sysenter_return, thread_info, sysenter_return);
+
+       BLANK();
+       OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe_ia32, uc.uc_mcontext);
+#endif
+
 #ifdef CONFIG_PARAVIRT
        BLANK();
        OFFSET(PARAVIRT_enabled, pv_info, paravirt_enabled);
@@ -49,7 +68,9 @@ void common(void) {
        OFFSET(PV_IRQ_irq_disable, pv_irq_ops, irq_disable);
        OFFSET(PV_IRQ_irq_enable, pv_irq_ops, irq_enable);
        OFFSET(PV_CPU_iret, pv_cpu_ops, iret);
+#ifdef CONFIG_X86_32
        OFFSET(PV_CPU_irq_enable_sysexit, pv_cpu_ops, irq_enable_sysexit);
+#endif
        OFFSET(PV_CPU_read_cr0, pv_cpu_ops, read_cr0);
        OFFSET(PV_MMU_read_cr2, pv_mmu_ops, read_cr2);
 #endif
index 47703aed74cfb7d013b2d577488d68c7280bf8c6..6ce39025f467fb060ce68c8b8ebfaf8f87258d09 100644 (file)
@@ -17,17 +17,6 @@ void foo(void);
 
 void foo(void)
 {
-       OFFSET(IA32_SIGCONTEXT_ax, sigcontext, ax);
-       OFFSET(IA32_SIGCONTEXT_bx, sigcontext, bx);
-       OFFSET(IA32_SIGCONTEXT_cx, sigcontext, cx);
-       OFFSET(IA32_SIGCONTEXT_dx, sigcontext, dx);
-       OFFSET(IA32_SIGCONTEXT_si, sigcontext, si);
-       OFFSET(IA32_SIGCONTEXT_di, sigcontext, di);
-       OFFSET(IA32_SIGCONTEXT_bp, sigcontext, bp);
-       OFFSET(IA32_SIGCONTEXT_sp, sigcontext, sp);
-       OFFSET(IA32_SIGCONTEXT_ip, sigcontext, ip);
-       BLANK();
-
        OFFSET(CPUINFO_x86, cpuinfo_x86, x86);
        OFFSET(CPUINFO_x86_vendor, cpuinfo_x86, x86_vendor);
        OFFSET(CPUINFO_x86_model, cpuinfo_x86, x86_model);
@@ -37,10 +26,6 @@ void foo(void)
        OFFSET(CPUINFO_x86_vendor_id, cpuinfo_x86, x86_vendor_id);
        BLANK();
 
-       OFFSET(TI_sysenter_return, thread_info, sysenter_return);
-       OFFSET(TI_cpu, thread_info, cpu);
-       BLANK();
-
        OFFSET(PT_EBX, pt_regs, bx);
        OFFSET(PT_ECX, pt_regs, cx);
        OFFSET(PT_EDX, pt_regs, dx);
@@ -60,9 +45,6 @@ void foo(void)
        OFFSET(PT_OLDSS,  pt_regs, ss);
        BLANK();
 
-       OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe, uc.uc_mcontext);
-       BLANK();
-
        OFFSET(saved_context_gdt_desc, saved_context, gdt_desc);
        BLANK();
 
index 5ce6f2da87639c7d373879e43120c9035ba590b9..d8f42f902a0f6a6f8d3d5fbeebad623fc729a906 100644 (file)
@@ -29,27 +29,6 @@ int main(void)
        BLANK();
 #endif
 
-#ifdef CONFIG_IA32_EMULATION
-       OFFSET(TI_sysenter_return, thread_info, sysenter_return);
-       BLANK();
-
-#define ENTRY(entry) OFFSET(IA32_SIGCONTEXT_ ## entry, sigcontext_ia32, entry)
-       ENTRY(ax);
-       ENTRY(bx);
-       ENTRY(cx);
-       ENTRY(dx);
-       ENTRY(si);
-       ENTRY(di);
-       ENTRY(bp);
-       ENTRY(sp);
-       ENTRY(ip);
-       BLANK();
-#undef ENTRY
-
-       OFFSET(IA32_RT_SIGFRAME_sigcontext, rt_sigframe_ia32, uc.uc_mcontext);
-       BLANK();
-#endif
-
 #define ENTRY(entry) OFFSET(pt_regs_ ## entry, pt_regs, entry)
        ENTRY(bx);
        ENTRY(cx);
@@ -87,7 +66,7 @@ int main(void)
        DEFINE(__NR_syscall_max, sizeof(syscalls_64) - 1);
        DEFINE(NR_syscalls, sizeof(syscalls_64));
 
-       DEFINE(__NR_ia32_syscall_max, sizeof(syscalls_ia32) - 1);
+       DEFINE(__NR_syscall_compat_max, sizeof(syscalls_ia32) - 1);
        DEFINE(IA32_NR_syscalls, sizeof(syscalls_ia32));
 
        return 0;
index e4cf63301ff439ed390e50a81a72fef9ea50e23a..dd3a4baffe50cca6595a57755e17c7d284ee999c 100644 (file)
 
 #include "cpu.h"
 
+/*
+ * nodes_per_socket: Stores the number of nodes per socket.
+ * Refer to Fam15h Models 00-0fh BKDG - CPUID Fn8000_001E_ECX
+ * Node Identifiers[10:8]
+ */
+static u32 nodes_per_socket = 1;
+
 static inline int rdmsrl_amd_safe(unsigned msr, unsigned long long *p)
 {
        u32 gprs[8] = { 0 };
@@ -288,10 +295,10 @@ static int nearby_node(int apicid)
  *     Assumption: Number of cores in each internal node is the same.
  * (2) AMD processors supporting compute units
  */
-#ifdef CONFIG_X86_HT
+#ifdef CONFIG_SMP
 static void amd_get_topology(struct cpuinfo_x86 *c)
 {
-       u32 nodes, cores_per_cu = 1;
+       u32 cores_per_cu = 1;
        u8 node_id;
        int cpu = smp_processor_id();
 
@@ -300,7 +307,7 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
                u32 eax, ebx, ecx, edx;
 
                cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
-               nodes = ((ecx >> 8) & 7) + 1;
+               nodes_per_socket = ((ecx >> 8) & 7) + 1;
                node_id = ecx & 7;
 
                /* get compute unit information */
@@ -311,18 +318,18 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
                u64 value;
 
                rdmsrl(MSR_FAM10H_NODE_ID, value);
-               nodes = ((value >> 3) & 7) + 1;
+               nodes_per_socket = ((value >> 3) & 7) + 1;
                node_id = value & 7;
        } else
                return;
 
        /* fixup multi-node processor information */
-       if (nodes > 1) {
+       if (nodes_per_socket > 1) {
                u32 cores_per_node;
                u32 cus_per_node;
 
                set_cpu_cap(c, X86_FEATURE_AMD_DCM);
-               cores_per_node = c->x86_max_cores / nodes;
+               cores_per_node = c->x86_max_cores / nodes_per_socket;
                cus_per_node = cores_per_node / cores_per_cu;
 
                /* store NodeID, use llc_shared_map to store sibling info */
@@ -341,7 +348,7 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
  */
 static void amd_detect_cmp(struct cpuinfo_x86 *c)
 {
-#ifdef CONFIG_X86_HT
+#ifdef CONFIG_SMP
        unsigned bits;
        int cpu = smp_processor_id();
 
@@ -366,6 +373,12 @@ u16 amd_get_nb_id(int cpu)
 }
 EXPORT_SYMBOL_GPL(amd_get_nb_id);
 
+u32 amd_get_nodes_per_socket(void)
+{
+       return nodes_per_socket;
+}
+EXPORT_SYMBOL_GPL(amd_get_nodes_per_socket);
+
 static void srat_detect_node(struct cpuinfo_x86 *c)
 {
 #ifdef CONFIG_NUMA
@@ -420,7 +433,7 @@ static void srat_detect_node(struct cpuinfo_x86 *c)
 
 static void early_init_amd_mc(struct cpuinfo_x86 *c)
 {
-#ifdef CONFIG_X86_HT
+#ifdef CONFIG_SMP
        unsigned bits, ecx;
 
        /* Multi core CPU? */
@@ -520,8 +533,16 @@ static void early_init_amd(struct cpuinfo_x86 *c)
                        set_cpu_cap(c, X86_FEATURE_K6_MTRR);
 #endif
 #if defined(CONFIG_X86_LOCAL_APIC) && defined(CONFIG_PCI)
-       /* check CPU config space for extended APIC ID */
-       if (cpu_has_apic && c->x86 >= 0xf) {
+       /*
+        * ApicID can always be treated as an 8-bit value for AMD APIC versions
+        * >= 0x10, but even old K8s came out of reset with version 0x10. So, we
+        * can safely set X86_FEATURE_EXTD_APICID unconditionally for families
+        * after 16h.
+        */
+       if (cpu_has_apic && c->x86 > 0x16) {
+               set_cpu_cap(c, X86_FEATURE_EXTD_APICID);
+       } else if (cpu_has_apic && c->x86 >= 0xf) {
+               /* check CPU config space for extended APIC ID */
                unsigned int val;
                val = read_pci_config(0, 24, 0, 0x68);
                if ((val & ((1 << 17) | (1 << 18))) == ((1 << 17) | (1 << 18)))
index 03445346ee0aae247f31ebf2aef6a48be0dfce8a..bd17db15a2c1ef07412c73f064f282a3f55c9313 100644 (file)
 #include <asm/bugs.h>
 #include <asm/processor.h>
 #include <asm/processor-flags.h>
-#include <asm/i387.h>
+#include <asm/fpu/internal.h>
 #include <asm/msr.h>
 #include <asm/paravirt.h>
 #include <asm/alternative.h>
 
-static double __initdata x = 4195835.0;
-static double __initdata y = 3145727.0;
-
-/*
- * This used to check for exceptions..
- * However, it turns out that to support that,
- * the XMM trap handlers basically had to
- * be buggy. So let's have a correct XMM trap
- * handler, and forget about printing out
- * some status at boot.
- *
- * We should really only care about bugs here
- * anyway. Not features.
- */
-static void __init check_fpu(void)
-{
-       s32 fdiv_bug;
-
-       kernel_fpu_begin();
-
-       /*
-        * trap_init() enabled FXSR and company _before_ testing for FP
-        * problems here.
-        *
-        * Test for the divl bug: http://en.wikipedia.org/wiki/Fdiv_bug
-        */
-       __asm__("fninit\n\t"
-               "fldl %1\n\t"
-               "fdivl %2\n\t"
-               "fmull %2\n\t"
-               "fldl %1\n\t"
-               "fsubp %%st,%%st(1)\n\t"
-               "fistpl %0\n\t"
-               "fwait\n\t"
-               "fninit"
-               : "=m" (*&fdiv_bug)
-               : "m" (*&x), "m" (*&y));
-
-       kernel_fpu_end();
-
-       if (fdiv_bug) {
-               set_cpu_bug(&boot_cpu_data, X86_BUG_FDIV);
-               pr_warn("Hmm, FPU with FDIV bug\n");
-       }
-}
-
 void __init check_bugs(void)
 {
        identify_boot_cpu();
@@ -85,10 +39,5 @@ void __init check_bugs(void)
                '0' + (boot_cpu_data.x86 > 6 ? 6 : boot_cpu_data.x86);
        alternative_instructions();
 
-       /*
-        * kernel_fpu_begin/end() in check_fpu() relies on the patched
-        * alternative instructions.
-        */
-       if (cpu_has_fpu)
-               check_fpu();
+       fpu__init_check_bugs();
 }
index a62cf04dac8ae99d1310b02b827bca528311d490..9fc5e3d9d9c8390f4c9bb177449979061f20bb14 100644 (file)
@@ -5,6 +5,7 @@
 #include <linux/module.h>
 #include <linux/percpu.h>
 #include <linux/string.h>
+#include <linux/ctype.h>
 #include <linux/delay.h>
 #include <linux/sched.h>
 #include <linux/init.h>
@@ -31,8 +32,7 @@
 #include <asm/setup.h>
 #include <asm/apic.h>
 #include <asm/desc.h>
-#include <asm/i387.h>
-#include <asm/fpu-internal.h>
+#include <asm/fpu/internal.h>
 #include <asm/mtrr.h>
 #include <linux/numa.h>
 #include <asm/asm.h>
@@ -145,32 +145,21 @@ DEFINE_PER_CPU_PAGE_ALIGNED(struct gdt_page, gdt_page) = { .gdt = {
 } };
 EXPORT_PER_CPU_SYMBOL_GPL(gdt_page);
 
-static int __init x86_xsave_setup(char *s)
+static int __init x86_mpx_setup(char *s)
 {
+       /* require an exact match without trailing characters */
        if (strlen(s))
                return 0;
-       setup_clear_cpu_cap(X86_FEATURE_XSAVE);
-       setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
-       setup_clear_cpu_cap(X86_FEATURE_XSAVES);
-       setup_clear_cpu_cap(X86_FEATURE_AVX);
-       setup_clear_cpu_cap(X86_FEATURE_AVX2);
-       return 1;
-}
-__setup("noxsave", x86_xsave_setup);
 
-static int __init x86_xsaveopt_setup(char *s)
-{
-       setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
-       return 1;
-}
-__setup("noxsaveopt", x86_xsaveopt_setup);
+       /* do not emit a message if the feature is not present */
+       if (!boot_cpu_has(X86_FEATURE_MPX))
+               return 1;
 
-static int __init x86_xsaves_setup(char *s)
-{
-       setup_clear_cpu_cap(X86_FEATURE_XSAVES);
+       setup_clear_cpu_cap(X86_FEATURE_MPX);
+       pr_info("nompx: Intel Memory Protection Extensions (MPX) disabled\n");
        return 1;
 }
-__setup("noxsaves", x86_xsaves_setup);
+__setup("nompx", x86_mpx_setup);
 
 #ifdef CONFIG_X86_32
 static int cachesize_override = -1;
@@ -183,14 +172,6 @@ static int __init cachesize_setup(char *str)
 }
 __setup("cachesize=", cachesize_setup);
 
-static int __init x86_fxsr_setup(char *s)
-{
-       setup_clear_cpu_cap(X86_FEATURE_FXSR);
-       setup_clear_cpu_cap(X86_FEATURE_XMM);
-       return 1;
-}
-__setup("nofxsr", x86_fxsr_setup);
-
 static int __init x86_sep_setup(char *s)
 {
        setup_clear_cpu_cap(X86_FEATURE_SEP);
@@ -419,7 +400,7 @@ static const struct cpu_dev *cpu_devs[X86_VENDOR_NUM] = {};
 static void get_model_name(struct cpuinfo_x86 *c)
 {
        unsigned int *v;
-       char *p, *q;
+       char *p, *q, *s;
 
        if (c->extended_cpuid_level < 0x80000004)
                return;
@@ -430,19 +411,21 @@ static void get_model_name(struct cpuinfo_x86 *c)
        cpuid(0x80000004, &v[8], &v[9], &v[10], &v[11]);
        c->x86_model_id[48] = 0;
 
-       /*
-        * Intel chips right-justify this string for some dumb reason;
-        * undo that brain damage:
-        */
-       p = q = &c->x86_model_id[0];
+       /* Trim whitespace */
+       p = q = s = &c->x86_model_id[0];
+
        while (*p == ' ')
                p++;
-       if (p != q) {
-               while (*p)
-                       *q++ = *p++;
-               while (q <= &c->x86_model_id[48])
-                       *q++ = '\0';    /* Zero-pad the rest */
+
+       while (*p) {
+               /* Note the last non-whitespace index */
+               if (!isspace(*p))
+                       s = q;
+
+               *q++ = *p++;
        }
+
+       *(s + 1) = '\0';
 }
 
 void cpu_detect_cache_sizes(struct cpuinfo_x86 *c)
@@ -508,7 +491,7 @@ static void cpu_detect_tlb(struct cpuinfo_x86 *c)
 
 void detect_ht(struct cpuinfo_x86 *c)
 {
-#ifdef CONFIG_X86_HT
+#ifdef CONFIG_SMP
        u32 eax, ebx, ecx, edx;
        int index_msb, core_bits;
        static bool printed;
@@ -759,7 +742,7 @@ static void __init early_identify_cpu(struct cpuinfo_x86 *c)
        cpu_detect(c);
        get_cpu_vendor(c);
        get_cpu_cap(c);
-       fpu_detect(c);
+       fpu__init_system(c);
 
        if (this_cpu->c_early_init)
                this_cpu->c_early_init(c);
@@ -844,7 +827,7 @@ static void generic_identify(struct cpuinfo_x86 *c)
        if (c->cpuid_level >= 0x00000001) {
                c->initial_apicid = (cpuid_ebx(1) >> 24) & 0xFF;
 #ifdef CONFIG_X86_32
-# ifdef CONFIG_X86_HT
+# ifdef CONFIG_SMP
                c->apicid = apic->phys_pkg_id(c->initial_apicid, 0);
 # else
                c->apicid = c->initial_apicid;
@@ -1026,7 +1009,7 @@ void enable_sep_cpu(void)
              (unsigned long)tss + offsetofend(struct tss_struct, SYSENTER_stack),
              0);
 
-       wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)ia32_sysenter_target, 0);
+       wrmsr(MSR_IA32_SYSENTER_EIP, (unsigned long)entry_SYSENTER_32, 0);
 
 out:
        put_cpu();
@@ -1122,7 +1105,7 @@ void print_cpu_info(struct cpuinfo_x86 *c)
                printk(KERN_CONT "%s ", vendor);
 
        if (c->x86_model_id[0])
-               printk(KERN_CONT "%s", strim(c->x86_model_id));
+               printk(KERN_CONT "%s", c->x86_model_id);
        else
                printk(KERN_CONT "%d86", c->x86);
 
@@ -1155,10 +1138,6 @@ static __init int setup_disablecpuid(char *arg)
 }
 __setup("clearcpuid=", setup_disablecpuid);
 
-DEFINE_PER_CPU(unsigned long, kernel_stack) =
-       (unsigned long)&init_thread_union + THREAD_SIZE;
-EXPORT_PER_CPU_SYMBOL(kernel_stack);
-
 #ifdef CONFIG_X86_64
 struct desc_ptr idt_descr = { NR_VECTORS * 16 - 1, (unsigned long) idt_table };
 struct desc_ptr debug_idt_descr = { NR_VECTORS * 16 - 1,
@@ -1183,8 +1162,6 @@ DEFINE_PER_CPU(unsigned int, irq_count) __visible = -1;
 DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
 EXPORT_PER_CPU_SYMBOL(__preempt_count);
 
-DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
-
 /*
  * Special IST stacks which the CPU switches to when it calls
  * an IST-marked descriptor entry. Up to 7 stacks (hardware
@@ -1208,10 +1185,10 @@ void syscall_init(void)
         * set CS/DS but only a 32bit target. LSTAR sets the 64bit rip.
         */
        wrmsrl(MSR_STAR,  ((u64)__USER32_CS)<<48  | ((u64)__KERNEL_CS)<<32);
-       wrmsrl(MSR_LSTAR, system_call);
+       wrmsrl(MSR_LSTAR, entry_SYSCALL_64);
 
 #ifdef CONFIG_IA32_EMULATION
-       wrmsrl(MSR_CSTAR, ia32_cstar_target);
+       wrmsrl(MSR_CSTAR, entry_SYSCALL_compat);
        /*
         * This only works on Intel CPUs.
         * On AMD CPUs these MSRs are 32-bit, CPU truncates MSR_IA32_SYSENTER_EIP.
@@ -1220,7 +1197,7 @@ void syscall_init(void)
         */
        wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)__KERNEL_CS);
        wrmsrl_safe(MSR_IA32_SYSENTER_ESP, 0ULL);
-       wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)ia32_sysenter_target);
+       wrmsrl_safe(MSR_IA32_SYSENTER_EIP, (u64)entry_SYSENTER_compat);
 #else
        wrmsrl(MSR_CSTAR, ignore_sysret);
        wrmsrl_safe(MSR_IA32_SYSENTER_CS, (u64)GDT_ENTRY_INVALID_SEG);
@@ -1275,7 +1252,6 @@ DEFINE_PER_CPU(struct task_struct *, current_task) = &init_task;
 EXPORT_PER_CPU_SYMBOL(current_task);
 DEFINE_PER_CPU(int, __preempt_count) = INIT_PREEMPT_COUNT;
 EXPORT_PER_CPU_SYMBOL(__preempt_count);
-DEFINE_PER_CPU(struct task_struct *, fpu_owner_task);
 
 /*
  * On x86_32, vm86 modifies tss.sp0, so sp0 isn't a reliable way to find
@@ -1439,7 +1415,7 @@ void cpu_init(void)
        clear_all_debug_regs();
        dbg_restore_debug_regs();
 
-       fpu_init();
+       fpu__init_cpu();
 
        if (is_uv_system())
                uv_cpu_init();
@@ -1495,7 +1471,7 @@ void cpu_init(void)
        clear_all_debug_regs();
        dbg_restore_debug_regs();
 
-       fpu_init();
+       fpu__init_cpu();
 }
 #endif
 
index edcb0e28c336d085d0ee1011793c98ba6f5a13ae..be4febc58b9443372e4df5037e357789de5184fe 100644 (file)
@@ -654,7 +654,7 @@ unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
        unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
        unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
        unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
-#ifdef CONFIG_X86_HT
+#ifdef CONFIG_SMP
        unsigned int cpu = c->cpu_index;
 #endif
 
@@ -773,19 +773,19 @@ unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
 
        if (new_l2) {
                l2 = new_l2;
-#ifdef CONFIG_X86_HT
+#ifdef CONFIG_SMP
                per_cpu(cpu_llc_id, cpu) = l2_id;
 #endif
        }
 
        if (new_l3) {
                l3 = new_l3;
-#ifdef CONFIG_X86_HT
+#ifdef CONFIG_SMP
                per_cpu(cpu_llc_id, cpu) = l3_id;
 #endif
        }
 
-#ifdef CONFIG_X86_HT
+#ifdef CONFIG_SMP
        /*
         * If cpu_llc_id is not yet set, this means cpuid_level < 4 which in
         * turns means that the only possibility is SMT (as indicated in
index e535533d5ab89313ba51937ad8dd5740413f119e..df919ff103c3ae845e727388765ebc1842df0cbc 100644 (file)
 static DEFINE_MUTEX(mce_chrdev_read_mutex);
 
 #define rcu_dereference_check_mce(p) \
-       rcu_dereference_index_check((p), \
-                             rcu_read_lock_sched_held() || \
-                             lockdep_is_held(&mce_chrdev_read_mutex))
+({ \
+       rcu_lockdep_assert(rcu_read_lock_sched_held() || \
+                          lockdep_is_held(&mce_chrdev_read_mutex), \
+                          "suspicious rcu_dereference_check_mce() usage"); \
+       smp_load_acquire(&(p)); \
+})
 
 #define CREATE_TRACE_POINTS
 #include <trace/events/mce.h>
@@ -708,6 +711,7 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
                          struct pt_regs *regs)
 {
        int i, ret = 0;
+       char *tmp;
 
        for (i = 0; i < mca_cfg.banks; i++) {
                m->status = mce_rdmsrl(MSR_IA32_MCx_STATUS(i));
@@ -716,9 +720,11 @@ static int mce_no_way_out(struct mce *m, char **msg, unsigned long *validp,
                        if (quirk_no_way_out)
                                quirk_no_way_out(i, m, regs);
                }
-               if (mce_severity(m, mca_cfg.tolerant, msg, true) >=
-                   MCE_PANIC_SEVERITY)
+
+               if (mce_severity(m, mca_cfg.tolerant, &tmp, true) >= MCE_PANIC_SEVERITY) {
+                       *msg = tmp;
                        ret = 1;
+               }
        }
        return ret;
 }
@@ -1047,6 +1053,7 @@ void do_machine_check(struct pt_regs *regs, long error_code)
        char *msg = "Unknown";
        u64 recover_paddr = ~0ull;
        int flags = MF_ACTION_REQUIRED;
+       int lmce = 0;
 
        prev_state = ist_enter(regs);
 
@@ -1074,11 +1081,20 @@ void do_machine_check(struct pt_regs *regs, long error_code)
                kill_it = 1;
 
        /*
-        * Go through all the banks in exclusion of the other CPUs.
-        * This way we don't report duplicated events on shared banks
-        * because the first one to see it will clear it.
+        * Check if this MCE is signaled to only this logical processor
         */
-       order = mce_start(&no_way_out);
+       if (m.mcgstatus & MCG_STATUS_LMCES)
+               lmce = 1;
+       else {
+               /*
+                * Go through all the banks in exclusion of the other CPUs.
+                * This way we don't report duplicated events on shared banks
+                * because the first one to see it will clear it.
+                * If this is a Local MCE, then no need to perform rendezvous.
+                */
+               order = mce_start(&no_way_out);
+       }
+
        for (i = 0; i < cfg->banks; i++) {
                __clear_bit(i, toclear);
                if (!test_bit(i, valid_banks))
@@ -1155,8 +1171,18 @@ void do_machine_check(struct pt_regs *regs, long error_code)
         * Do most of the synchronization with other CPUs.
         * When there's any problem use only local no_way_out state.
         */
-       if (mce_end(order) < 0)
-               no_way_out = worst >= MCE_PANIC_SEVERITY;
+       if (!lmce) {
+               if (mce_end(order) < 0)
+                       no_way_out = worst >= MCE_PANIC_SEVERITY;
+       } else {
+               /*
+                * Local MCE skipped calling mce_reign()
+                * If we found a fatal error, we need to panic here.
+                */
+                if (worst >= MCE_PANIC_SEVERITY && mca_cfg.tolerant < 3)
+                       mce_panic("Machine check from unknown source",
+                               NULL, NULL);
+       }
 
        /*
         * At insane "tolerant" levels we take no action. Otherwise
@@ -1637,10 +1663,16 @@ static void __mcheck_cpu_init_vendor(struct cpuinfo_x86 *c)
                mce_intel_feature_init(c);
                mce_adjust_timer = cmci_intel_adjust_timer;
                break;
-       case X86_VENDOR_AMD:
+
+       case X86_VENDOR_AMD: {
+               u32 ebx = cpuid_ebx(0x80000007);
+
                mce_amd_feature_init(c);
-               mce_flags.overflow_recov = cpuid_ebx(0x80000007) & 0x1;
+               mce_flags.overflow_recov = !!(ebx & BIT(0));
+               mce_flags.succor         = !!(ebx & BIT(1));
                break;
+               }
+
        default:
                break;
        }
@@ -1884,7 +1916,7 @@ out:
 static unsigned int mce_chrdev_poll(struct file *file, poll_table *wait)
 {
        poll_wait(file, &mce_chrdev_wait, wait);
-       if (rcu_access_index(mcelog.next))
+       if (READ_ONCE(mcelog.next))
                return POLLIN | POLLRDNORM;
        if (!mce_apei_read_done && apei_check_mce())
                return POLLIN | POLLRDNORM;
@@ -1929,8 +1961,8 @@ void register_mce_write_callback(ssize_t (*fn)(struct file *filp,
 }
 EXPORT_SYMBOL_GPL(register_mce_write_callback);
 
-ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf,
-                        size_t usize, loff_t *off)
+static ssize_t mce_chrdev_write(struct file *filp, const char __user *ubuf,
+                               size_t usize, loff_t *off)
 {
        if (mce_write)
                return mce_write(filp, ubuf, usize, off);
@@ -1976,6 +2008,7 @@ void mce_disable_bank(int bank)
 /*
  * mce=off Disables machine check
  * mce=no_cmci Disables CMCI
+ * mce=no_lmce Disables LMCE
  * mce=dont_log_ce Clears corrected events silently, no log created for CEs.
  * mce=ignore_ce Disables polling and CMCI, corrected events are not cleared.
  * mce=TOLERANCELEVEL[,monarchtimeout] (number, see above)
@@ -1999,6 +2032,8 @@ static int __init mcheck_enable(char *str)
                cfg->disabled = true;
        else if (!strcmp(str, "no_cmci"))
                cfg->cmci_disabled = true;
+       else if (!strcmp(str, "no_lmce"))
+               cfg->lmce_disabled = true;
        else if (!strcmp(str, "dont_log_ce"))
                cfg->dont_log_ce = true;
        else if (!strcmp(str, "ignore_ce"))
@@ -2008,11 +2043,8 @@ static int __init mcheck_enable(char *str)
        else if (!strcmp(str, "bios_cmci_threshold"))
                cfg->bios_cmci_threshold = true;
        else if (isdigit(str[0])) {
-               get_option(&str, &(cfg->tolerant));
-               if (*str == ',') {
-                       ++str;
+               if (get_option(&str, &cfg->tolerant) == 2)
                        get_option(&str, &(cfg->monarch_timeout));
-               }
        } else {
                pr_info("mce argument %s ignored. Please use /sys\n", str);
                return 0;
index 55ad9b37cae853ce0d50f193dc7eb82a49207fee..e99b15077e9464b9c9f337873ae58101285e3215 100644 (file)
@@ -1,19 +1,13 @@
 /*
- *  (c) 2005-2012 Advanced Micro Devices, Inc.
+ *  (c) 2005-2015 Advanced Micro Devices, Inc.
  *  Your use of this code is subject to the terms and conditions of the
  *  GNU general public license version 2. See "COPYING" or
  *  http://www.gnu.org/licenses/gpl.html
  *
  *  Written by Jacob Shin - AMD, Inc.
- *
  *  Maintained by: Borislav Petkov <bp@alien8.de>
  *
- *  April 2006
- *     - added support for AMD Family 0x10 processors
- *  May 2012
- *     - major scrubbing
- *
- *  All MC4_MISCi registers are shared between multi-cores
+ *  All MC4_MISCi registers are shared between cores on a node.
  */
 #include <linux/interrupt.h>
 #include <linux/notifier.h>
@@ -32,6 +26,7 @@
 #include <asm/idle.h>
 #include <asm/mce.h>
 #include <asm/msr.h>
+#include <asm/trace/irq_vectors.h>
 
 #define NR_BLOCKS         9
 #define THRESHOLD_MAX     0xFFF
 #define MASK_BLKPTR_LO    0xFF000000
 #define MCG_XBLK_ADDR     0xC0000400
 
+/* Deferred error settings */
+#define MSR_CU_DEF_ERR         0xC0000410
+#define MASK_DEF_LVTOFF                0x000000F0
+#define MASK_DEF_INT_TYPE      0x00000006
+#define DEF_LVT_OFF            0x2
+#define DEF_INT_TYPE_APIC      0x2
+
 static const char * const th_names[] = {
        "load_store",
        "insn_fetch",
@@ -60,6 +62,13 @@ static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks);
 static DEFINE_PER_CPU(unsigned char, bank_map);        /* see which banks are on */
 
 static void amd_threshold_interrupt(void);
+static void amd_deferred_error_interrupt(void);
+
+static void default_deferred_error_interrupt(void)
+{
+       pr_err("Unexpected deferred interrupt at vector %x\n", DEFERRED_ERROR_VECTOR);
+}
+void (*deferred_error_int_vector)(void) = default_deferred_error_interrupt;
 
 /*
  * CPU Initialization
@@ -196,7 +205,7 @@ static void mce_threshold_block_init(struct threshold_block *b, int offset)
        threshold_restart_bank(&tr);
 };
 
-static int setup_APIC_mce(int reserved, int new)
+static int setup_APIC_mce_threshold(int reserved, int new)
 {
        if (reserved < 0 && !setup_APIC_eilvt(new, THRESHOLD_APIC_VECTOR,
                                              APIC_EILVT_MSG_FIX, 0))
@@ -205,6 +214,39 @@ static int setup_APIC_mce(int reserved, int new)
        return reserved;
 }
 
+static int setup_APIC_deferred_error(int reserved, int new)
+{
+       if (reserved < 0 && !setup_APIC_eilvt(new, DEFERRED_ERROR_VECTOR,
+                                             APIC_EILVT_MSG_FIX, 0))
+               return new;
+
+       return reserved;
+}
+
+static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c)
+{
+       u32 low = 0, high = 0;
+       int def_offset = -1, def_new;
+
+       if (rdmsr_safe(MSR_CU_DEF_ERR, &low, &high))
+               return;
+
+       def_new = (low & MASK_DEF_LVTOFF) >> 4;
+       if (!(low & MASK_DEF_LVTOFF)) {
+               pr_err(FW_BUG "Your BIOS is not setting up LVT offset 0x2 for deferred error IRQs correctly.\n");
+               def_new = DEF_LVT_OFF;
+               low = (low & ~MASK_DEF_LVTOFF) | (DEF_LVT_OFF << 4);
+       }
+
+       def_offset = setup_APIC_deferred_error(def_offset, def_new);
+       if ((def_offset == def_new) &&
+           (deferred_error_int_vector != amd_deferred_error_interrupt))
+               deferred_error_int_vector = amd_deferred_error_interrupt;
+
+       low = (low & ~MASK_DEF_INT_TYPE) | DEF_INT_TYPE_APIC;
+       wrmsr(MSR_CU_DEF_ERR, low, high);
+}
+
 /* cpu init entry point, called from mce.c with preempt off */
 void mce_amd_feature_init(struct cpuinfo_x86 *c)
 {
@@ -252,7 +294,7 @@ void mce_amd_feature_init(struct cpuinfo_x86 *c)
 
                        b.interrupt_enable = 1;
                        new     = (high & MASK_LVTOFF_HI) >> 20;
-                       offset  = setup_APIC_mce(offset, new);
+                       offset  = setup_APIC_mce_threshold(offset, new);
 
                        if ((offset == new) &&
                            (mce_threshold_vector != amd_threshold_interrupt))
@@ -262,6 +304,73 @@ init:
                        mce_threshold_block_init(&b, offset);
                }
        }
+
+       if (mce_flags.succor)
+               deferred_error_interrupt_enable(c);
+}
+
+static void __log_error(unsigned int bank, bool threshold_err, u64 misc)
+{
+       struct mce m;
+       u64 status;
+
+       rdmsrl(MSR_IA32_MCx_STATUS(bank), status);
+       if (!(status & MCI_STATUS_VAL))
+               return;
+
+       mce_setup(&m);
+
+       m.status = status;
+       m.bank = bank;
+
+       if (threshold_err)
+               m.misc = misc;
+
+       if (m.status & MCI_STATUS_ADDRV)
+               rdmsrl(MSR_IA32_MCx_ADDR(bank), m.addr);
+
+       mce_log(&m);
+       wrmsrl(MSR_IA32_MCx_STATUS(bank), 0);
+}
+
+static inline void __smp_deferred_error_interrupt(void)
+{
+       inc_irq_stat(irq_deferred_error_count);
+       deferred_error_int_vector();
+}
+
+asmlinkage __visible void smp_deferred_error_interrupt(void)
+{
+       entering_irq();
+       __smp_deferred_error_interrupt();
+       exiting_ack_irq();
+}
+
+asmlinkage __visible void smp_trace_deferred_error_interrupt(void)
+{
+       entering_irq();
+       trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR);
+       __smp_deferred_error_interrupt();
+       trace_deferred_error_apic_exit(DEFERRED_ERROR_VECTOR);
+       exiting_ack_irq();
+}
+
+/* APIC interrupt handler for deferred errors */
+static void amd_deferred_error_interrupt(void)
+{
+       u64 status;
+       unsigned int bank;
+
+       for (bank = 0; bank < mca_cfg.banks; ++bank) {
+               rdmsrl(MSR_IA32_MCx_STATUS(bank), status);
+
+               if (!(status & MCI_STATUS_VAL) ||
+                   !(status & MCI_STATUS_DEFERRED))
+                       continue;
+
+               __log_error(bank, false, 0);
+               break;
+       }
 }
 
 /*
@@ -273,12 +382,12 @@ init:
  * the interrupt goes off when error_count reaches threshold_limit.
  * the handler will simply log mcelog w/ software defined bank number.
  */
+
 static void amd_threshold_interrupt(void)
 {
        u32 low = 0, high = 0, address = 0;
        int cpu = smp_processor_id();
        unsigned int bank, block;
-       struct mce m;
 
        /* assume first bank caused it */
        for (bank = 0; bank < mca_cfg.banks; ++bank) {
@@ -321,15 +430,7 @@ static void amd_threshold_interrupt(void)
        return;
 
 log:
-       mce_setup(&m);
-       rdmsrl(MSR_IA32_MCx_STATUS(bank), m.status);
-       if (!(m.status & MCI_STATUS_VAL))
-               return;
-       m.misc = ((u64)high << 32) | low;
-       m.bank = bank;
-       mce_log(&m);
-
-       wrmsrl(MSR_IA32_MCx_STATUS(bank), 0);
+       __log_error(bank, true, ((u64)high << 32) | low);
 }
 
 /*
index b4a41cf030edab7dbfae7dc67560ba63eabc2309..844f56c5616d242dc41631957bf94f687f039b0d 100644 (file)
@@ -91,6 +91,36 @@ static int cmci_supported(int *banks)
        return !!(cap & MCG_CMCI_P);
 }
 
+static bool lmce_supported(void)
+{
+       u64 tmp;
+
+       if (mca_cfg.lmce_disabled)
+               return false;
+
+       rdmsrl(MSR_IA32_MCG_CAP, tmp);
+
+       /*
+        * LMCE depends on recovery support in the processor. Hence both
+        * MCG_SER_P and MCG_LMCE_P should be present in MCG_CAP.
+        */
+       if ((tmp & (MCG_SER_P | MCG_LMCE_P)) !=
+                  (MCG_SER_P | MCG_LMCE_P))
+               return false;
+
+       /*
+        * BIOS should indicate support for LMCE by setting bit 20 in
+        * IA32_FEATURE_CONTROL without which touching MCG_EXT_CTL will
+        * generate a #GP fault.
+        */
+       rdmsrl(MSR_IA32_FEATURE_CONTROL, tmp);
+       if ((tmp & (FEATURE_CONTROL_LOCKED | FEATURE_CONTROL_LMCE)) ==
+                  (FEATURE_CONTROL_LOCKED | FEATURE_CONTROL_LMCE))
+               return true;
+
+       return false;
+}
+
 bool mce_intel_cmci_poll(void)
 {
        if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE)
@@ -405,8 +435,22 @@ static void intel_init_cmci(void)
        cmci_recheck();
 }
 
+void intel_init_lmce(void)
+{
+       u64 val;
+
+       if (!lmce_supported())
+               return;
+
+       rdmsrl(MSR_IA32_MCG_EXT_CTL, val);
+
+       if (!(val & MCG_EXT_CTL_LMCE_EN))
+               wrmsrl(MSR_IA32_MCG_EXT_CTL, val | MCG_EXT_CTL_LMCE_EN);
+}
+
 void mce_intel_feature_init(struct cpuinfo_x86 *c)
 {
        intel_init_thermal(c);
        intel_init_cmci();
+       intel_init_lmce();
 }
index 737737edbd1ef5bbc478cf2b6f898f25c231ea81..e8a215a9a34557542380e4a9e7fcf64a1ed63d91 100644 (file)
@@ -228,7 +228,23 @@ static void apply_ucode_in_initrd(void *ucode, size_t size, bool save_patch)
        }
 }
 
-void __init load_ucode_amd_bsp(void)
+static bool __init load_builtin_amd_microcode(struct cpio_data *cp,
+                                             unsigned int family)
+{
+#ifdef CONFIG_X86_64
+       char fw_name[36] = "amd-ucode/microcode_amd.bin";
+
+       if (family >= 0x15)
+               snprintf(fw_name, sizeof(fw_name),
+                        "amd-ucode/microcode_amd_fam%.2xh.bin", family);
+
+       return get_builtin_firmware(cp, fw_name);
+#else
+       return false;
+#endif
+}
+
+void __init load_ucode_amd_bsp(unsigned int family)
 {
        struct cpio_data cp;
        void **data;
@@ -243,8 +259,10 @@ void __init load_ucode_amd_bsp(void)
 #endif
 
        cp = find_ucode_in_initrd();
-       if (!cp.data)
-               return;
+       if (!cp.data) {
+               if (!load_builtin_amd_microcode(&cp, family))
+                       return;
+       }
 
        *data = cp.data;
        *size = cp.size;
index 36a83617eb21cc19245794a89c986eba45179d3a..6236a54a63f449ce2ea824be13a3bcca2f57e4b9 100644 (file)
@@ -1,74 +1,16 @@
 /*
- *     Intel CPU Microcode Update Driver for Linux
+ * CPU Microcode Update Driver for Linux
  *
- *     Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
- *                   2006      Shaohua Li <shaohua.li@intel.com>
+ * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
+ *           2006      Shaohua Li <shaohua.li@intel.com>
+ *           2013-2015 Borislav Petkov <bp@alien8.de>
  *
- *     This driver allows to upgrade microcode on Intel processors
- *     belonging to IA-32 family - PentiumPro, Pentium II,
- *     Pentium III, Xeon, Pentium 4, etc.
+ * This driver allows to upgrade microcode on x86 processors.
  *
- *     Reference: Section 8.11 of Volume 3a, IA-32 Intel? Architecture
- *     Software Developer's Manual
- *     Order Number 253668 or free download from:
- *
- *     http://developer.intel.com/Assets/PDF/manual/253668.pdf 
- *
- *     For more information, go to http://www.urbanmyth.org/microcode
- *
- *     This program is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License
- *     as published by the Free Software Foundation; either version
- *     2 of the License, or (at your option) any later version.
- *
- *     1.0     16 Feb 2000, Tigran Aivazian <tigran@sco.com>
- *             Initial release.
- *     1.01    18 Feb 2000, Tigran Aivazian <tigran@sco.com>
- *             Added read() support + cleanups.
- *     1.02    21 Feb 2000, Tigran Aivazian <tigran@sco.com>
- *             Added 'device trimming' support. open(O_WRONLY) zeroes
- *             and frees the saved copy of applied microcode.
- *     1.03    29 Feb 2000, Tigran Aivazian <tigran@sco.com>
- *             Made to use devfs (/dev/cpu/microcode) + cleanups.
- *     1.04    06 Jun 2000, Simon Trimmer <simon@veritas.com>
- *             Added misc device support (now uses both devfs and misc).
- *             Added MICROCODE_IOCFREE ioctl to clear memory.
- *     1.05    09 Jun 2000, Simon Trimmer <simon@veritas.com>
- *             Messages for error cases (non Intel & no suitable microcode).
- *     1.06    03 Aug 2000, Tigran Aivazian <tigran@veritas.com>
- *             Removed ->release(). Removed exclusive open and status bitmap.
- *             Added microcode_rwsem to serialize read()/write()/ioctl().
- *             Removed global kernel lock usage.
- *     1.07    07 Sep 2000, Tigran Aivazian <tigran@veritas.com>
- *             Write 0 to 0x8B msr and then cpuid before reading revision,
- *             so that it works even if there were no update done by the
- *             BIOS. Otherwise, reading from 0x8B gives junk (which happened
- *             to be 0 on my machine which is why it worked even when I
- *             disabled update by the BIOS)
- *             Thanks to Eric W. Biederman <ebiederman@lnxi.com> for the fix.
- *     1.08    11 Dec 2000, Richard Schaal <richard.schaal@intel.com> and
- *                          Tigran Aivazian <tigran@veritas.com>
- *             Intel Pentium 4 processor support and bugfixes.
- *     1.09    30 Oct 2001, Tigran Aivazian <tigran@veritas.com>
- *             Bugfix for HT (Hyper-Threading) enabled processors
- *             whereby processor resources are shared by all logical processors
- *             in a single CPU package.
- *     1.10    28 Feb 2002 Asit K Mallick <asit.k.mallick@intel.com> and
- *             Tigran Aivazian <tigran@veritas.com>,
- *             Serialize updates as required on HT processors due to
- *             speculative nature of implementation.
- *     1.11    22 Mar 2002 Tigran Aivazian <tigran@veritas.com>
- *             Fix the panic when writing zero-length microcode chunk.
- *     1.12    29 Sep 2003 Nitin Kamble <nitin.a.kamble@intel.com>,
- *             Jun Nakajima <jun.nakajima@intel.com>
- *             Support for the microcode updates in the new format.
- *     1.13    10 Oct 2003 Tigran Aivazian <tigran@veritas.com>
- *             Removed ->read() method and obsoleted MICROCODE_IOCFREE ioctl
- *             because we no longer hold a copy of applied microcode
- *             in kernel memory.
- *     1.14    25 Jun 2004 Tigran Aivazian <tigran@veritas.com>
- *             Fix sigmatch() macro to handle old CPUs with pf == 0.
- *             Thanks to Stuart Swales for pointing out this bug.
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
index a413a69cbd744f2e2873434ee20b66e86fb466dd..8ebc421d62996ae8d90b4828fd52a4fae198cae1 100644 (file)
@@ -3,6 +3,7 @@
  *
  *     Copyright (C) 2012 Fenghua Yu <fenghua.yu@intel.com>
  *                        H Peter Anvin" <hpa@zytor.com>
+ *               (C) 2015 Borislav Petkov <bp@alien8.de>
  *
  *     This driver allows to early upgrade microcode on Intel processors
  *     belonging to IA-32 family - PentiumPro, Pentium II,
@@ -17,6 +18,7 @@
  *     2 of the License, or (at your option) any later version.
  */
 #include <linux/module.h>
+#include <linux/firmware.h>
 #include <asm/microcode.h>
 #include <asm/microcode_intel.h>
 #include <asm/microcode_amd.h>
@@ -43,9 +45,29 @@ static bool __init check_loader_disabled_bsp(void)
        return *res;
 }
 
+extern struct builtin_fw __start_builtin_fw[];
+extern struct builtin_fw __end_builtin_fw[];
+
+bool get_builtin_firmware(struct cpio_data *cd, const char *name)
+{
+#ifdef CONFIG_FW_LOADER
+       struct builtin_fw *b_fw;
+
+       for (b_fw = __start_builtin_fw; b_fw != __end_builtin_fw; b_fw++) {
+               if (!strcmp(name, b_fw->name)) {
+                       cd->size = b_fw->size;
+                       cd->data = b_fw->data;
+                       return true;
+               }
+       }
+#endif
+       return false;
+}
+
 void __init load_ucode_bsp(void)
 {
-       int vendor, family;
+       int vendor;
+       unsigned int family;
 
        if (check_loader_disabled_bsp())
                return;
@@ -63,7 +85,7 @@ void __init load_ucode_bsp(void)
                break;
        case X86_VENDOR_AMD:
                if (family >= 0x10)
-                       load_ucode_amd_bsp();
+                       load_ucode_amd_bsp(family);
                break;
        default:
                break;
index a41beadb3db9a396e5b74795e62a49648b367870..969dc17eb1b4b86775d5496bb6ebe9ba67110b4c 100644 (file)
@@ -1,74 +1,13 @@
 /*
- *     Intel CPU Microcode Update Driver for Linux
+ * Intel CPU Microcode Update Driver for Linux
  *
- *     Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
- *                   2006      Shaohua Li <shaohua.li@intel.com>
+ * Copyright (C) 2000-2006 Tigran Aivazian <tigran@aivazian.fsnet.co.uk>
+ *              2006 Shaohua Li <shaohua.li@intel.com>
  *
- *     This driver allows to upgrade microcode on Intel processors
- *     belonging to IA-32 family - PentiumPro, Pentium II,
- *     Pentium III, Xeon, Pentium 4, etc.
- *
- *     Reference: Section 8.11 of Volume 3a, IA-32 Intel? Architecture
- *     Software Developer's Manual
- *     Order Number 253668 or free download from:
- *
- *     http://developer.intel.com/Assets/PDF/manual/253668.pdf 
- *
- *     For more information, go to http://www.urbanmyth.org/microcode
- *
- *     This program is free software; you can redistribute it and/or
- *     modify it under the terms of the GNU General Public License
- *     as published by the Free Software Foundation; either version
- *     2 of the License, or (at your option) any later version.
- *
- *     1.0     16 Feb 2000, Tigran Aivazian <tigran@sco.com>
- *             Initial release.
- *     1.01    18 Feb 2000, Tigran Aivazian <tigran@sco.com>
- *             Added read() support + cleanups.
- *     1.02    21 Feb 2000, Tigran Aivazian <tigran@sco.com>
- *             Added 'device trimming' support. open(O_WRONLY) zeroes
- *             and frees the saved copy of applied microcode.
- *     1.03    29 Feb 2000, Tigran Aivazian <tigran@sco.com>
- *             Made to use devfs (/dev/cpu/microcode) + cleanups.
- *     1.04    06 Jun 2000, Simon Trimmer <simon@veritas.com>
- *             Added misc device support (now uses both devfs and misc).
- *             Added MICROCODE_IOCFREE ioctl to clear memory.
- *     1.05    09 Jun 2000, Simon Trimmer <simon@veritas.com>
- *             Messages for error cases (non Intel & no suitable microcode).
- *     1.06    03 Aug 2000, Tigran Aivazian <tigran@veritas.com>
- *             Removed ->release(). Removed exclusive open and status bitmap.
- *             Added microcode_rwsem to serialize read()/write()/ioctl().
- *             Removed global kernel lock usage.
- *     1.07    07 Sep 2000, Tigran Aivazian <tigran@veritas.com>
- *             Write 0 to 0x8B msr and then cpuid before reading revision,
- *             so that it works even if there were no update done by the
- *             BIOS. Otherwise, reading from 0x8B gives junk (which happened
- *             to be 0 on my machine which is why it worked even when I
- *             disabled update by the BIOS)
- *             Thanks to Eric W. Biederman <ebiederman@lnxi.com> for the fix.
- *     1.08    11 Dec 2000, Richard Schaal <richard.schaal@intel.com> and
- *                          Tigran Aivazian <tigran@veritas.com>
- *             Intel Pentium 4 processor support and bugfixes.
- *     1.09    30 Oct 2001, Tigran Aivazian <tigran@veritas.com>
- *             Bugfix for HT (Hyper-Threading) enabled processors
- *             whereby processor resources are shared by all logical processors
- *             in a single CPU package.
- *     1.10    28 Feb 2002 Asit K Mallick <asit.k.mallick@intel.com> and
- *             Tigran Aivazian <tigran@veritas.com>,
- *             Serialize updates as required on HT processors due to
- *             speculative nature of implementation.
- *     1.11    22 Mar 2002 Tigran Aivazian <tigran@veritas.com>
- *             Fix the panic when writing zero-length microcode chunk.
- *     1.12    29 Sep 2003 Nitin Kamble <nitin.a.kamble@intel.com>,
- *             Jun Nakajima <jun.nakajima@intel.com>
- *             Support for the microcode updates in the new format.
- *     1.13    10 Oct 2003 Tigran Aivazian <tigran@veritas.com>
- *             Removed ->read() method and obsoleted MICROCODE_IOCFREE ioctl
- *             because we no longer hold a copy of applied microcode
- *             in kernel memory.
- *     1.14    25 Jun 2004 Tigran Aivazian <tigran@veritas.com>
- *             Fix sigmatch() macro to handle old CPUs with pf == 0.
- *             Thanks to Stuart Swales for pointing out this bug.
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
  */
 
 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
@@ -124,7 +63,7 @@ static int get_matching_mc(struct microcode_intel *mc_intel, int cpu)
        cpf = cpu_sig.pf;
        crev = cpu_sig.rev;
 
-       return get_matching_microcode(csig, cpf, crev, mc_intel);
+       return has_newer_microcode(mc_intel, csig, cpf, crev);
 }
 
 static int apply_microcode_intel(int cpu)
@@ -226,7 +165,7 @@ static enum ucode_state generic_load_microcode(int cpu, void *data, size_t size,
 
                csig = uci->cpu_sig.sig;
                cpf = uci->cpu_sig.pf;
-               if (get_matching_microcode(csig, cpf, new_rev, mc)) {
+               if (has_newer_microcode(mc, csig, cpf, new_rev)) {
                        vfree(new_mc);
                        new_rev = mc_header.rev;
                        new_mc  = mc;
index 2f49ab4ac0ae137d7ab0b851cf4b9e751d58922c..8187b7247d1c3e97b9a7321a1d0e12ccda1e0962 100644 (file)
@@ -59,10 +59,10 @@ load_microcode_early(struct microcode_intel **saved,
                ucode_ptr = saved[i];
                mc_hdr    = (struct microcode_header_intel *)ucode_ptr;
 
-               ret = get_matching_microcode(uci->cpu_sig.sig,
-                                            uci->cpu_sig.pf,
-                                            new_rev,
-                                            ucode_ptr);
+               ret = has_newer_microcode(ucode_ptr,
+                                         uci->cpu_sig.sig,
+                                         uci->cpu_sig.pf,
+                                         new_rev);
                if (!ret)
                        continue;
 
@@ -246,7 +246,7 @@ static unsigned int _save_mc(struct microcode_intel **mc_saved,
                             u8 *ucode_ptr, unsigned int num_saved)
 {
        struct microcode_header_intel *mc_hdr, *mc_saved_hdr;
-       unsigned int sig, pf, new_rev;
+       unsigned int sig, pf;
        int found = 0, i;
 
        mc_hdr = (struct microcode_header_intel *)ucode_ptr;
@@ -255,14 +255,13 @@ static unsigned int _save_mc(struct microcode_intel **mc_saved,
                mc_saved_hdr = (struct microcode_header_intel *)mc_saved[i];
                sig          = mc_saved_hdr->sig;
                pf           = mc_saved_hdr->pf;
-               new_rev      = mc_hdr->rev;
 
-               if (!get_matching_sig(sig, pf, new_rev, ucode_ptr))
+               if (!find_matching_signature(ucode_ptr, sig, pf))
                        continue;
 
                found = 1;
 
-               if (!revision_is_newer(mc_hdr, new_rev))
+               if (mc_hdr->rev <= mc_saved_hdr->rev)
                        continue;
 
                /*
@@ -522,6 +521,27 @@ out:
 EXPORT_SYMBOL_GPL(save_mc_for_early);
 #endif
 
+static bool __init load_builtin_intel_microcode(struct cpio_data *cp)
+{
+#ifdef CONFIG_X86_64
+       unsigned int eax = 0x00000001, ebx, ecx = 0, edx;
+       unsigned int family, model, stepping;
+       char name[30];
+
+       native_cpuid(&eax, &ebx, &ecx, &edx);
+
+       family   = __x86_family(eax);
+       model    = x86_model(eax);
+       stepping = eax & 0xf;
+
+       sprintf(name, "intel-ucode/%02x-%02x-%02x", family, model, stepping);
+
+       return get_builtin_firmware(cp, name);
+#else
+       return false;
+#endif
+}
+
 static __initdata char ucode_name[] = "kernel/x86/microcode/GenuineIntel.bin";
 static __init enum ucode_state
 scan_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
@@ -540,8 +560,10 @@ scan_microcode(struct mc_saved_data *mc_saved_data, unsigned long *initrd,
        cd.size = 0;
 
        cd = find_cpio_data(p, (void *)start, size, &offset);
-       if (!cd.data)
-               return UCODE_ERROR;
+       if (!cd.data) {
+               if (!load_builtin_intel_microcode(&cd))
+                       return UCODE_ERROR;
+       }
 
        return get_matching_model_microcode(0, start, cd.data, cd.size,
                                            mc_saved_data, initrd, uci);
index cd47a510a3f174233300d8763705b6f200faf9f4..1883d252ff7d60ce7707a8108283144de64ce14d 100644 (file)
 #include <asm/processor.h>
 #include <asm/msr.h>
 
-static inline int
-update_match_cpu(unsigned int csig, unsigned int cpf,
-                unsigned int sig, unsigned int pf)
+static inline bool cpu_signatures_match(unsigned int s1, unsigned int p1,
+                                       unsigned int s2, unsigned int p2)
 {
-       return (!sigmatch(sig, csig, pf, cpf)) ? 0 : 1;
+       if (s1 != s2)
+               return false;
+
+       /* Processor flags are either both 0 ... */
+       if (!p1 && !p2)
+               return true;
+
+       /* ... or they intersect. */
+       return p1 & p2;
 }
 
 int microcode_sanity_check(void *mc, int print_err)
@@ -124,27 +131,25 @@ EXPORT_SYMBOL_GPL(microcode_sanity_check);
 /*
  * Returns 1 if update has been found, 0 otherwise.
  */
-int get_matching_sig(unsigned int csig, int cpf, int rev, void *mc)
+int find_matching_signature(void *mc, unsigned int csig, int cpf)
 {
-       struct microcode_header_intel *mc_header = mc;
-       struct extended_sigtable *ext_header;
-       unsigned long total_size = get_totalsize(mc_header);
-       int ext_sigcount, i;
+       struct microcode_header_intel *mc_hdr = mc;
+       struct extended_sigtable *ext_hdr;
        struct extended_signature *ext_sig;
+       int i;
 
-       if (update_match_cpu(csig, cpf, mc_header->sig, mc_header->pf))
+       if (cpu_signatures_match(csig, cpf, mc_hdr->sig, mc_hdr->pf))
                return 1;
 
        /* Look for ext. headers: */
-       if (total_size <= get_datasize(mc_header) + MC_HEADER_SIZE)
+       if (get_totalsize(mc_hdr) <= get_datasize(mc_hdr) + MC_HEADER_SIZE)
                return 0;
 
-       ext_header = mc + get_datasize(mc_header) + MC_HEADER_SIZE;
-       ext_sigcount = ext_header->count;
-       ext_sig = (void *)ext_header + EXT_HEADER_SIZE;
+       ext_hdr = mc + get_datasize(mc_hdr) + MC_HEADER_SIZE;
+       ext_sig = (void *)ext_hdr + EXT_HEADER_SIZE;
 
-       for (i = 0; i < ext_sigcount; i++) {
-               if (update_match_cpu(csig, cpf, ext_sig->sig, ext_sig->pf))
+       for (i = 0; i < ext_hdr->count; i++) {
+               if (cpu_signatures_match(csig, cpf, ext_sig->sig, ext_sig->pf))
                        return 1;
                ext_sig++;
        }
@@ -154,13 +159,13 @@ int get_matching_sig(unsigned int csig, int cpf, int rev, void *mc)
 /*
  * Returns 1 if update has been found, 0 otherwise.
  */
-int get_matching_microcode(unsigned int csig, int cpf, int rev, void *mc)
+int has_newer_microcode(void *mc, unsigned int csig, int cpf, int new_rev)
 {
        struct microcode_header_intel *mc_hdr = mc;
 
-       if (!revision_is_newer(mc_hdr, rev))
+       if (mc_hdr->rev <= new_rev)
                return 0;
 
-       return get_matching_sig(csig, cpf, rev, mc);
+       return find_matching_signature(mc, csig, cpf);
 }
-EXPORT_SYMBOL_GPL(get_matching_microcode);
+EXPORT_SYMBOL_GPL(has_newer_microcode);
index 939155ffdecec60628a06b2937604dd2f2f98813..aad4bd84b475ec4c762e72f62bbd9fb07f7ac2c4 100644 (file)
@@ -39,14 +39,12 @@ void hyperv_vector_handler(struct pt_regs *regs)
 {
        struct pt_regs *old_regs = set_irq_regs(regs);
 
-       irq_enter();
-       exit_idle();
-
+       entering_irq();
        inc_irq_stat(irq_hv_callback_count);
        if (vmbus_handler)
                vmbus_handler();
 
-       irq_exit();
+       exiting_irq();
        set_irq_regs(old_regs);
 }
 
index 5f90b85ff22e584be8d1d7eb1615acde7584b397..70d7c93f455083e8703f6a37e94dc636432d7b64 100644 (file)
@@ -98,7 +98,8 @@ x86_get_mtrr_mem_range(struct range *range, int nr_range,
                        continue;
                base = range_state[i].base_pfn;
                if (base < (1<<(20-PAGE_SHIFT)) && mtrr_state.have_fixed &&
-                   (mtrr_state.enabled & 1)) {
+                   (mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED) &&
+                   (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) {
                        /* Var MTRR contains UC entry below 1M? Skip it: */
                        printk(BIOS_BUG_MSG, i);
                        if (base + size <= (1<<(20-PAGE_SHIFT)))
index 7d74f7b3c6ba49ee5f3ef9baa93e9eb03a906992..3b533cf37c745c9ecfc81fc5fde94bc46f84e1b5 100644 (file)
@@ -102,59 +102,76 @@ static int check_type_overlap(u8 *prev, u8 *curr)
        return 0;
 }
 
-/*
- * Error/Semi-error returns:
- * 0xFF - when MTRR is not enabled
- * *repeat == 1 implies [start:end] spanned across MTRR range and type returned
- *             corresponds only to [start:*partial_end].
- *             Caller has to lookup again for [*partial_end:end].
+/**
+ * mtrr_type_lookup_fixed - look up memory type in MTRR fixed entries
+ *
+ * Return the MTRR fixed memory type of 'start'.
+ *
+ * MTRR fixed entries are divided into the following ways:
+ *  0x00000 - 0x7FFFF : This range is divided into eight 64KB sub-ranges
+ *  0x80000 - 0xBFFFF : This range is divided into sixteen 16KB sub-ranges
+ *  0xC0000 - 0xFFFFF : This range is divided into sixty-four 4KB sub-ranges
+ *
+ * Return Values:
+ * MTRR_TYPE_(type)  - Matched memory type
+ * MTRR_TYPE_INVALID - Unmatched
+ */
+static u8 mtrr_type_lookup_fixed(u64 start, u64 end)
+{
+       int idx;
+
+       if (start >= 0x100000)
+               return MTRR_TYPE_INVALID;
+
+       /* 0x0 - 0x7FFFF */
+       if (start < 0x80000) {
+               idx = 0;
+               idx += (start >> 16);
+               return mtrr_state.fixed_ranges[idx];
+       /* 0x80000 - 0xBFFFF */
+       } else if (start < 0xC0000) {
+               idx = 1 * 8;
+               idx += ((start - 0x80000) >> 14);
+               return mtrr_state.fixed_ranges[idx];
+       }
+
+       /* 0xC0000 - 0xFFFFF */
+       idx = 3 * 8;
+       idx += ((start - 0xC0000) >> 12);
+       return mtrr_state.fixed_ranges[idx];
+}
+
+/**
+ * mtrr_type_lookup_variable - look up memory type in MTRR variable entries
+ *
+ * Return Value:
+ * MTRR_TYPE_(type) - Matched memory type or default memory type (unmatched)
+ *
+ * Output Arguments:
+ * repeat - Set to 1 when [start:end] spanned across MTRR range and type
+ *         returned corresponds only to [start:*partial_end].  Caller has
+ *         to lookup again for [*partial_end:end].
+ *
+ * uniform - Set to 1 when an MTRR covers the region uniformly, i.e. the
+ *          region is fully covered by a single MTRR entry or the default
+ *          type.
  */
-static u8 __mtrr_type_lookup(u64 start, u64 end, u64 *partial_end, int *repeat)
+static u8 mtrr_type_lookup_variable(u64 start, u64 end, u64 *partial_end,
+                                   int *repeat, u8 *uniform)
 {
        int i;
        u64 base, mask;
        u8 prev_match, curr_match;
 
        *repeat = 0;
-       if (!mtrr_state_set)
-               return 0xFF;
-
-       if (!mtrr_state.enabled)
-               return 0xFF;
+       *uniform = 1;
 
-       /* Make end inclusive end, instead of exclusive */
+       /* Make end inclusive instead of exclusive */
        end--;
 
-       /* Look in fixed ranges. Just return the type as per start */
-       if (mtrr_state.have_fixed && (start < 0x100000)) {
-               int idx;
-
-               if (start < 0x80000) {
-                       idx = 0;
-                       idx += (start >> 16);
-                       return mtrr_state.fixed_ranges[idx];
-               } else if (start < 0xC0000) {
-                       idx = 1 * 8;
-                       idx += ((start - 0x80000) >> 14);
-                       return mtrr_state.fixed_ranges[idx];
-               } else if (start < 0x1000000) {
-                       idx = 3 * 8;
-                       idx += ((start - 0xC0000) >> 12);
-                       return mtrr_state.fixed_ranges[idx];
-               }
-       }
-
-       /*
-        * Look in variable ranges
-        * Look of multiple ranges matching this address and pick type
-        * as per MTRR precedence
-        */
-       if (!(mtrr_state.enabled & 2))
-               return mtrr_state.def_type;
-
-       prev_match = 0xFF;
+       prev_match = MTRR_TYPE_INVALID;
        for (i = 0; i < num_var_ranges; ++i) {
-               unsigned short start_state, end_state;
+               unsigned short start_state, end_state, inclusive;
 
                if (!(mtrr_state.var_ranges[i].mask_lo & (1 << 11)))
                        continue;
@@ -166,20 +183,29 @@ static u8 __mtrr_type_lookup(u64 start, u64 end, u64 *partial_end, int *repeat)
 
                start_state = ((start & mask) == (base & mask));
                end_state = ((end & mask) == (base & mask));
+               inclusive = ((start < base) && (end > base));
 
-               if (start_state != end_state) {
+               if ((start_state != end_state) || inclusive) {
                        /*
                         * We have start:end spanning across an MTRR.
-                        * We split the region into
-                        * either
-                        * (start:mtrr_end) (mtrr_end:end)
-                        * or
-                        * (start:mtrr_start) (mtrr_start:end)
+                        * We split the region into either
+                        *
+                        * - start_state:1
+                        * (start:mtrr_end)(mtrr_end:end)
+                        * - end_state:1
+                        * (start:mtrr_start)(mtrr_start:end)
+                        * - inclusive:1
+                        * (start:mtrr_start)(mtrr_start:mtrr_end)(mtrr_end:end)
+                        *
                         * depending on kind of overlap.
-                        * Return the type for first region and a pointer to
-                        * the start of second region so that caller will
-                        * lookup again on the second region.
-                        * Note: This way we handle multiple overlaps as well.
+                        *
+                        * Return the type of the first region and a pointer
+                        * to the start of next region so that caller will be
+                        * advised to lookup again after having adjusted start
+                        * and end.
+                        *
+                        * Note: This way we handle overlaps with multiple
+                        * entries and the default type properly.
                         */
                        if (start_state)
                                *partial_end = base + get_mtrr_size(mask);
@@ -193,59 +219,94 @@ static u8 __mtrr_type_lookup(u64 start, u64 end, u64 *partial_end, int *repeat)
 
                        end = *partial_end - 1; /* end is inclusive */
                        *repeat = 1;
+                       *uniform = 0;
                }
 
                if ((start & mask) != (base & mask))
                        continue;
 
                curr_match = mtrr_state.var_ranges[i].base_lo & 0xff;
-               if (prev_match == 0xFF) {
+               if (prev_match == MTRR_TYPE_INVALID) {
                        prev_match = curr_match;
                        continue;
                }
 
+               *uniform = 0;
                if (check_type_overlap(&prev_match, &curr_match))
                        return curr_match;
        }
 
-       if (mtrr_tom2) {
-               if (start >= (1ULL<<32) && (end < mtrr_tom2))
-                       return MTRR_TYPE_WRBACK;
-       }
-
-       if (prev_match != 0xFF)
+       if (prev_match != MTRR_TYPE_INVALID)
                return prev_match;
 
        return mtrr_state.def_type;
 }
 
-/*
- * Returns the effective MTRR type for the region
- * Error return:
- * 0xFF - when MTRR is not enabled
+/**
+ * mtrr_type_lookup - look up memory type in MTRR
+ *
+ * Return Values:
+ * MTRR_TYPE_(type)  - The effective MTRR type for the region
+ * MTRR_TYPE_INVALID - MTRR is disabled
+ *
+ * Output Argument:
+ * uniform - Set to 1 when an MTRR covers the region uniformly, i.e. the
+ *          region is fully covered by a single MTRR entry or the default
+ *          type.
  */
-u8 mtrr_type_lookup(u64 start, u64 end)
+u8 mtrr_type_lookup(u64 start, u64 end, u8 *uniform)
 {
-       u8 type, prev_type;
+       u8 type, prev_type, is_uniform = 1, dummy;
        int repeat;
        u64 partial_end;
 
-       type = __mtrr_type_lookup(start, end, &partial_end, &repeat);
+       if (!mtrr_state_set)
+               return MTRR_TYPE_INVALID;
+
+       if (!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED))
+               return MTRR_TYPE_INVALID;
+
+       /*
+        * Look up the fixed ranges first, which take priority over
+        * the variable ranges.
+        */
+       if ((start < 0x100000) &&
+           (mtrr_state.have_fixed) &&
+           (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) {
+               is_uniform = 0;
+               type = mtrr_type_lookup_fixed(start, end);
+               goto out;
+       }
+
+       /*
+        * Look up the variable ranges.  Look of multiple ranges matching
+        * this address and pick type as per MTRR precedence.
+        */
+       type = mtrr_type_lookup_variable(start, end, &partial_end,
+                                        &repeat, &is_uniform);
 
        /*
         * Common path is with repeat = 0.
         * However, we can have cases where [start:end] spans across some
-        * MTRR range. Do repeated lookups for that case here.
+        * MTRR ranges and/or the default type.  Do repeated lookups for
+        * that case here.
         */
        while (repeat) {
                prev_type = type;
                start = partial_end;
-               type = __mtrr_type_lookup(start, end, &partial_end, &repeat);
+               is_uniform = 0;
+               type = mtrr_type_lookup_variable(start, end, &partial_end,
+                                                &repeat, &dummy);
 
                if (check_type_overlap(&prev_type, &type))
-                       return type;
+                       goto out;
        }
 
+       if (mtrr_tom2 && (start >= (1ULL<<32)) && (end < mtrr_tom2))
+               type = MTRR_TYPE_WRBACK;
+
+out:
+       *uniform = is_uniform;
        return type;
 }
 
@@ -347,7 +408,9 @@ static void __init print_mtrr_state(void)
                 mtrr_attrib_to_str(mtrr_state.def_type));
        if (mtrr_state.have_fixed) {
                pr_debug("MTRR fixed ranges %sabled:\n",
-                        mtrr_state.enabled & 1 ? "en" : "dis");
+                       ((mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED) &&
+                        (mtrr_state.enabled & MTRR_STATE_MTRR_FIXED_ENABLED)) ?
+                        "en" : "dis");
                print_fixed(0x00000, 0x10000, mtrr_state.fixed_ranges + 0);
                for (i = 0; i < 2; ++i)
                        print_fixed(0x80000 + i * 0x20000, 0x04000,
@@ -360,7 +423,7 @@ static void __init print_mtrr_state(void)
                print_fixed_last();
        }
        pr_debug("MTRR variable ranges %sabled:\n",
-                mtrr_state.enabled & 2 ? "en" : "dis");
+                mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED ? "en" : "dis");
        high_width = (__ffs64(size_or_mask) - (32 - PAGE_SHIFT) + 3) / 4;
 
        for (i = 0; i < num_var_ranges; ++i) {
@@ -382,7 +445,7 @@ static void __init print_mtrr_state(void)
 }
 
 /* Grab all of the MTRR state for this CPU into *state */
-void __init get_mtrr_state(void)
+bool __init get_mtrr_state(void)
 {
        struct mtrr_var_range *vrs;
        unsigned long flags;
@@ -426,6 +489,8 @@ void __init get_mtrr_state(void)
 
        post_set();
        local_irq_restore(flags);
+
+       return !!(mtrr_state.enabled & MTRR_STATE_MTRR_ENABLED);
 }
 
 /* Some BIOS's are messed up and don't set all MTRRs the same! */
index ea5f363a194866303395358353a658750dcff3d4..e7ed0d8ebacb158833ea248c2d3efb6d2d6379de 100644 (file)
 #define MTRR_TO_PHYS_WC_OFFSET 1000
 
 u32 num_var_ranges;
+static bool __mtrr_enabled;
+
+static bool mtrr_enabled(void)
+{
+       return __mtrr_enabled;
+}
 
 unsigned int mtrr_usage_table[MTRR_MAX_VAR_RANGES];
 static DEFINE_MUTEX(mtrr_mutex);
@@ -286,7 +292,7 @@ int mtrr_add_page(unsigned long base, unsigned long size,
        int i, replace, error;
        mtrr_type ltype;
 
-       if (!mtrr_if)
+       if (!mtrr_enabled())
                return -ENXIO;
 
        error = mtrr_if->validate_add_page(base, size, type);
@@ -435,6 +441,8 @@ static int mtrr_check(unsigned long base, unsigned long size)
 int mtrr_add(unsigned long base, unsigned long size, unsigned int type,
             bool increment)
 {
+       if (!mtrr_enabled())
+               return -ENODEV;
        if (mtrr_check(base, size))
                return -EINVAL;
        return mtrr_add_page(base >> PAGE_SHIFT, size >> PAGE_SHIFT, type,
@@ -463,8 +471,8 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
        unsigned long lbase, lsize;
        int error = -EINVAL;
 
-       if (!mtrr_if)
-               return -ENXIO;
+       if (!mtrr_enabled())
+               return -ENODEV;
 
        max = num_var_ranges;
        /* No CPU hotplug when we change MTRR entries */
@@ -523,6 +531,8 @@ int mtrr_del_page(int reg, unsigned long base, unsigned long size)
  */
 int mtrr_del(int reg, unsigned long base, unsigned long size)
 {
+       if (!mtrr_enabled())
+               return -ENODEV;
        if (mtrr_check(base, size))
                return -EINVAL;
        return mtrr_del_page(reg, base >> PAGE_SHIFT, size >> PAGE_SHIFT);
@@ -538,6 +548,9 @@ EXPORT_SYMBOL(mtrr_del);
  * attempts to add a WC MTRR covering size bytes starting at base and
  * logs an error if this fails.
  *
+ * The called should provide a power of two size on an equivalent
+ * power of two boundary.
+ *
  * Drivers must store the return value to pass to mtrr_del_wc_if_needed,
  * but drivers should not try to interpret that return value.
  */
@@ -545,7 +558,7 @@ int arch_phys_wc_add(unsigned long base, unsigned long size)
 {
        int ret;
 
-       if (pat_enabled)
+       if (pat_enabled() || !mtrr_enabled())
                return 0;  /* Success!  (We don't need to do anything.) */
 
        ret = mtrr_add(base, size, MTRR_TYPE_WRCOMB, true);
@@ -577,7 +590,7 @@ void arch_phys_wc_del(int handle)
 EXPORT_SYMBOL(arch_phys_wc_del);
 
 /*
- * phys_wc_to_mtrr_index - translates arch_phys_wc_add's return value
+ * arch_phys_wc_index - translates arch_phys_wc_add's return value
  * @handle: Return value from arch_phys_wc_add
  *
  * This will turn the return value from arch_phys_wc_add into an mtrr
@@ -587,14 +600,14 @@ EXPORT_SYMBOL(arch_phys_wc_del);
  * in printk line.  Alas there is an illegitimate use in some ancient
  * drm ioctls.
  */
-int phys_wc_to_mtrr_index(int handle)
+int arch_phys_wc_index(int handle)
 {
        if (handle < MTRR_TO_PHYS_WC_OFFSET)
                return -1;
        else
                return handle - MTRR_TO_PHYS_WC_OFFSET;
 }
-EXPORT_SYMBOL_GPL(phys_wc_to_mtrr_index);
+EXPORT_SYMBOL_GPL(arch_phys_wc_index);
 
 /*
  * HACK ALERT!
@@ -734,10 +747,12 @@ void __init mtrr_bp_init(void)
        }
 
        if (mtrr_if) {
+               __mtrr_enabled = true;
                set_num_var_ranges();
                init_table();
                if (use_intel()) {
-                       get_mtrr_state();
+                       /* BIOS may override */
+                       __mtrr_enabled = get_mtrr_state();
 
                        if (mtrr_cleanup(phys_addr)) {
                                changed_by_mtrr_cleanup = 1;
@@ -745,10 +760,16 @@ void __init mtrr_bp_init(void)
                        }
                }
        }
+
+       if (!mtrr_enabled())
+               pr_info("MTRR: Disabled\n");
 }
 
 void mtrr_ap_init(void)
 {
+       if (!mtrr_enabled())
+               return;
+
        if (!use_intel() || mtrr_aps_delayed_init)
                return;
        /*
@@ -774,6 +795,9 @@ void mtrr_save_state(void)
 {
        int first_cpu;
 
+       if (!mtrr_enabled())
+               return;
+
        get_online_cpus();
        first_cpu = cpumask_first(cpu_online_mask);
        smp_call_function_single(first_cpu, mtrr_save_fixed_ranges, NULL, 1);
@@ -782,6 +806,8 @@ void mtrr_save_state(void)
 
 void set_mtrr_aps_delayed_init(void)
 {
+       if (!mtrr_enabled())
+               return;
        if (!use_intel())
                return;
 
@@ -793,7 +819,7 @@ void set_mtrr_aps_delayed_init(void)
  */
 void mtrr_aps_init(void)
 {
-       if (!use_intel())
+       if (!use_intel() || !mtrr_enabled())
                return;
 
        /*
@@ -810,7 +836,7 @@ void mtrr_aps_init(void)
 
 void mtrr_bp_restore(void)
 {
-       if (!use_intel())
+       if (!use_intel() || !mtrr_enabled())
                return;
 
        mtrr_if->set_all();
@@ -818,7 +844,7 @@ void mtrr_bp_restore(void)
 
 static int __init mtrr_init_finialize(void)
 {
-       if (!mtrr_if)
+       if (!mtrr_enabled())
                return 0;
 
        if (use_intel()) {
index df5e41f31a27e71cd44aadd35cf43f311b437008..951884dcc43354573c2bd234aed3fd3adb067a84 100644 (file)
@@ -51,7 +51,7 @@ void set_mtrr_prepare_save(struct set_mtrr_context *ctxt);
 
 void fill_mtrr_var_range(unsigned int index,
                u32 base_lo, u32 base_hi, u32 mask_lo, u32 mask_hi);
-void get_mtrr_state(void);
+bool get_mtrr_state(void);
 
 extern void set_mtrr_ops(const struct mtrr_ops *ops);
 
index 87848ebe2bb79a56625908c5a6af1b78055d70c9..5801a14f7524315a7318fe5a0f60509704fdb756 100644 (file)
@@ -135,6 +135,7 @@ static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
 }
 
 static atomic_t active_events;
+static atomic_t pmc_refcount;
 static DEFINE_MUTEX(pmc_reserve_mutex);
 
 #ifdef CONFIG_X86_LOCAL_APIC
@@ -190,6 +191,7 @@ static bool check_hw_exists(void)
        u64 val, val_fail, val_new= ~0;
        int i, reg, reg_fail, ret = 0;
        int bios_fail = 0;
+       int reg_safe = -1;
 
        /*
         * Check to see if the BIOS enabled any of the counters, if so
@@ -204,6 +206,8 @@ static bool check_hw_exists(void)
                        bios_fail = 1;
                        val_fail = val;
                        reg_fail = reg;
+               } else {
+                       reg_safe = i;
                }
        }
 
@@ -221,12 +225,23 @@ static bool check_hw_exists(void)
                }
        }
 
+       /*
+        * If all the counters are enabled, the below test will always
+        * fail.  The tools will also become useless in this scenario.
+        * Just fail and disable the hardware counters.
+        */
+
+       if (reg_safe == -1) {
+               reg = reg_safe;
+               goto msr_fail;
+       }
+
        /*
         * Read the current value, change it and read it back to see if it
         * matches, this is needed to detect certain hardware emulators
         * (qemu/kvm) that don't trap on the MSR access and always return 0s.
         */
-       reg = x86_pmu_event_addr(0);
+       reg = x86_pmu_event_addr(reg_safe);
        if (rdmsrl_safe(reg, &val))
                goto msr_fail;
        val ^= 0xffffUL;
@@ -256,11 +271,8 @@ msr_fail:
 
 static void hw_perf_event_destroy(struct perf_event *event)
 {
-       if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
-               release_pmc_hardware();
-               release_ds_buffers();
-               mutex_unlock(&pmc_reserve_mutex);
-       }
+       x86_release_hardware();
+       atomic_dec(&active_events);
 }
 
 void hw_perf_lbr_event_destroy(struct perf_event *event)
@@ -310,6 +322,35 @@ set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
        return x86_pmu_extra_regs(val, event);
 }
 
+int x86_reserve_hardware(void)
+{
+       int err = 0;
+
+       if (!atomic_inc_not_zero(&pmc_refcount)) {
+               mutex_lock(&pmc_reserve_mutex);
+               if (atomic_read(&pmc_refcount) == 0) {
+                       if (!reserve_pmc_hardware())
+                               err = -EBUSY;
+                       else
+                               reserve_ds_buffers();
+               }
+               if (!err)
+                       atomic_inc(&pmc_refcount);
+               mutex_unlock(&pmc_reserve_mutex);
+       }
+
+       return err;
+}
+
+void x86_release_hardware(void)
+{
+       if (atomic_dec_and_mutex_lock(&pmc_refcount, &pmc_reserve_mutex)) {
+               release_pmc_hardware();
+               release_ds_buffers();
+               mutex_unlock(&pmc_reserve_mutex);
+       }
+}
+
 /*
  * Check if we can create event of a certain type (that no conflicting events
  * are present).
@@ -322,21 +363,34 @@ int x86_add_exclusive(unsigned int what)
                return 0;
 
        mutex_lock(&pmc_reserve_mutex);
-       for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++)
+       for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) {
                if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i]))
                        goto out;
+       }
 
        atomic_inc(&x86_pmu.lbr_exclusive[what]);
        ret = 0;
 
 out:
        mutex_unlock(&pmc_reserve_mutex);
+
+       /*
+        * Assuming that all exclusive events will share the PMI handler
+        * (which checks active_events for whether there is work to do),
+        * we can bump active_events counter right here, except for
+        * x86_lbr_exclusive_lbr events that go through x86_pmu_event_init()
+        * path, which already bumps active_events for them.
+        */
+       if (!ret && what != x86_lbr_exclusive_lbr)
+               atomic_inc(&active_events);
+
        return ret;
 }
 
 void x86_del_exclusive(unsigned int what)
 {
        atomic_dec(&x86_pmu.lbr_exclusive[what]);
+       atomic_dec(&active_events);
 }
 
 int x86_setup_perfctr(struct perf_event *event)
@@ -513,22 +567,11 @@ static int __x86_pmu_event_init(struct perf_event *event)
        if (!x86_pmu_initialized())
                return -ENODEV;
 
-       err = 0;
-       if (!atomic_inc_not_zero(&active_events)) {
-               mutex_lock(&pmc_reserve_mutex);
-               if (atomic_read(&active_events) == 0) {
-                       if (!reserve_pmc_hardware())
-                               err = -EBUSY;
-                       else
-                               reserve_ds_buffers();
-               }
-               if (!err)
-                       atomic_inc(&active_events);
-               mutex_unlock(&pmc_reserve_mutex);
-       }
+       err = x86_reserve_hardware();
        if (err)
                return err;
 
+       atomic_inc(&active_events);
        event->destroy = hw_perf_event_destroy;
 
        event->hw.idx = -1;
@@ -611,6 +654,7 @@ struct sched_state {
        int     event;          /* event index */
        int     counter;        /* counter index */
        int     unassigned;     /* number of events to be assigned left */
+       int     nr_gp;          /* number of GP counters used */
        unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
 };
 
@@ -620,27 +664,29 @@ struct sched_state {
 struct perf_sched {
        int                     max_weight;
        int                     max_events;
-       struct perf_event       **events;
-       struct sched_state      state;
+       int                     max_gp;
        int                     saved_states;
+       struct event_constraint **constraints;
+       struct sched_state      state;
        struct sched_state      saved[SCHED_STATES_MAX];
 };
 
 /*
  * Initialize interator that runs through all events and counters.
  */
-static void perf_sched_init(struct perf_sched *sched, struct perf_event **events,
-                           int num, int wmin, int wmax)
+static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints,
+                           int num, int wmin, int wmax, int gpmax)
 {
        int idx;
 
        memset(sched, 0, sizeof(*sched));
        sched->max_events       = num;
        sched->max_weight       = wmax;
-       sched->events           = events;
+       sched->max_gp           = gpmax;
+       sched->constraints      = constraints;
 
        for (idx = 0; idx < num; idx++) {
-               if (events[idx]->hw.constraint->weight == wmin)
+               if (constraints[idx]->weight == wmin)
                        break;
        }
 
@@ -687,7 +733,7 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
        if (sched->state.event >= sched->max_events)
                return false;
 
-       c = sched->events[sched->state.event]->hw.constraint;
+       c = sched->constraints[sched->state.event];
        /* Prefer fixed purpose counters */
        if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) {
                idx = INTEL_PMC_IDX_FIXED;
@@ -696,11 +742,16 @@ static bool __perf_sched_find_counter(struct perf_sched *sched)
                                goto done;
                }
        }
+
        /* Grab the first unused counter starting with idx */
        idx = sched->state.counter;
        for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) {
-               if (!__test_and_set_bit(idx, sched->state.used))
+               if (!__test_and_set_bit(idx, sched->state.used)) {
+                       if (sched->state.nr_gp++ >= sched->max_gp)
+                               return false;
+
                        goto done;
+               }
        }
 
        return false;
@@ -745,7 +796,7 @@ static bool perf_sched_next_event(struct perf_sched *sched)
                        if (sched->state.weight > sched->max_weight)
                                return false;
                }
-               c = sched->events[sched->state.event]->hw.constraint;
+               c = sched->constraints[sched->state.event];
        } while (c->weight != sched->state.weight);
 
        sched->state.counter = 0;       /* start with first counter */
@@ -756,12 +807,12 @@ static bool perf_sched_next_event(struct perf_sched *sched)
 /*
  * Assign a counter for each event.
  */
-int perf_assign_events(struct perf_event **events, int n,
-                       int wmin, int wmax, int *assign)
+int perf_assign_events(struct event_constraint **constraints, int n,
+                       int wmin, int wmax, int gpmax, int *assign)
 {
        struct perf_sched sched;
 
-       perf_sched_init(&sched, events, n, wmin, wmax);
+       perf_sched_init(&sched, constraints, n, wmin, wmax, gpmax);
 
        do {
                if (!perf_sched_find_counter(&sched))
@@ -788,9 +839,9 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
                x86_pmu.start_scheduling(cpuc);
 
        for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
-               hwc = &cpuc->event_list[i]->hw;
+               cpuc->event_constraint[i] = NULL;
                c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]);
-               hwc->constraint = c;
+               cpuc->event_constraint[i] = c;
 
                wmin = min(wmin, c->weight);
                wmax = max(wmax, c->weight);
@@ -801,7 +852,7 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
         */
        for (i = 0; i < n; i++) {
                hwc = &cpuc->event_list[i]->hw;
-               c = hwc->constraint;
+               c = cpuc->event_constraint[i];
 
                /* never assigned */
                if (hwc->idx == -1)
@@ -821,9 +872,26 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
        }
 
        /* slow path */
-       if (i != n)
-               unsched = perf_assign_events(cpuc->event_list, n, wmin,
-                                            wmax, assign);
+       if (i != n) {
+               int gpmax = x86_pmu.num_counters;
+
+               /*
+                * Do not allow scheduling of more than half the available
+                * generic counters.
+                *
+                * This helps avoid counter starvation of sibling thread by
+                * ensuring at most half the counters cannot be in exclusive
+                * mode. There is no designated counters for the limits. Any
+                * N/2 counters can be used. This helps with events with
+                * specific counter constraints.
+                */
+               if (is_ht_workaround_enabled() && !cpuc->is_fake &&
+                   READ_ONCE(cpuc->excl_cntrs->exclusive_present))
+                       gpmax /= 2;
+
+               unsched = perf_assign_events(cpuc->event_constraint, n, wmin,
+                                            wmax, gpmax, assign);
+       }
 
        /*
         * In case of success (unsched = 0), mark events as committed,
@@ -840,12 +908,9 @@ int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
                        e = cpuc->event_list[i];
                        e->hw.flags |= PERF_X86_EVENT_COMMITTED;
                        if (x86_pmu.commit_scheduling)
-                               x86_pmu.commit_scheduling(cpuc, e, assign[i]);
+                               x86_pmu.commit_scheduling(cpuc, i, assign[i]);
                }
-       }
-
-       if (!assign || unsched) {
-
+       } else {
                for (i = 0; i < n; i++) {
                        e = cpuc->event_list[i];
                        /*
@@ -1058,13 +1123,16 @@ int x86_perf_event_set_period(struct perf_event *event)
 
        per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
 
-       /*
-        * The hw event starts counting from this event offset,
-        * mark it to be able to extra future deltas:
-        */
-       local64_set(&hwc->prev_count, (u64)-left);
+       if (!(hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) ||
+           local64_read(&hwc->prev_count) != (u64)-left) {
+               /*
+                * The hw event starts counting from this event offset,
+                * mark it to be able to extra future deltas:
+                */
+               local64_set(&hwc->prev_count, (u64)-left);
 
-       wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
+               wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
+       }
 
        /*
         * Due to erratum on certan cpu we need
@@ -1292,8 +1360,10 @@ static void x86_pmu_del(struct perf_event *event, int flags)
                x86_pmu.put_event_constraints(cpuc, event);
 
        /* Delete the array entry. */
-       while (++i < cpuc->n_events)
+       while (++i < cpuc->n_events) {
                cpuc->event_list[i-1] = cpuc->event_list[i];
+               cpuc->event_constraint[i-1] = cpuc->event_constraint[i];
+       }
        --cpuc->n_events;
 
        perf_event_update_userpage(event);
@@ -1374,6 +1444,10 @@ perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
        u64 finish_clock;
        int ret;
 
+       /*
+        * All PMUs/events that share this PMI handler should make sure to
+        * increment active_events for their events.
+        */
        if (!atomic_read(&active_events))
                return NMI_DONE;
 
index 6ac5cb7a9e14839dcd0b622a91f0f0939133c81e..3e7fd27dfe201718860185be3fe5eeb3c028ed5c 100644 (file)
@@ -74,6 +74,9 @@ struct event_constraint {
 #define PERF_X86_EVENT_EXCL            0x0040 /* HT exclusivity on counter */
 #define PERF_X86_EVENT_DYNAMIC         0x0080 /* dynamic alloc'd constraint */
 #define PERF_X86_EVENT_RDPMC_ALLOWED   0x0100 /* grant rdpmc permission */
+#define PERF_X86_EVENT_EXCL_ACCT       0x0200 /* accounted EXCL event */
+#define PERF_X86_EVENT_AUTO_RELOAD     0x0400 /* use PEBS auto-reload */
+#define PERF_X86_EVENT_FREERUNNING     0x0800 /* use freerunning PEBS */
 
 
 struct amd_nb {
@@ -86,6 +89,18 @@ struct amd_nb {
 /* The maximal number of PEBS events: */
 #define MAX_PEBS_EVENTS                8
 
+/*
+ * Flags PEBS can handle without an PMI.
+ *
+ * TID can only be handled by flushing at context switch.
+ *
+ */
+#define PEBS_FREERUNNING_FLAGS \
+       (PERF_SAMPLE_IP | PERF_SAMPLE_TID | PERF_SAMPLE_ADDR | \
+       PERF_SAMPLE_ID | PERF_SAMPLE_CPU | PERF_SAMPLE_STREAM_ID | \
+       PERF_SAMPLE_DATA_SRC | PERF_SAMPLE_IDENTIFIER | \
+       PERF_SAMPLE_TRANSACTION)
+
 /*
  * A debug store configuration.
  *
@@ -132,10 +147,7 @@ enum intel_excl_state_type {
 };
 
 struct intel_excl_states {
-       enum intel_excl_state_type init_state[X86_PMC_IDX_MAX];
        enum intel_excl_state_type state[X86_PMC_IDX_MAX];
-       int  num_alloc_cntrs;/* #counters allocated */
-       int  max_alloc_cntrs;/* max #counters allowed */
        bool sched_started; /* true if scheduling has started */
 };
 
@@ -144,6 +156,11 @@ struct intel_excl_cntrs {
 
        struct intel_excl_states states[2];
 
+       union {
+               u16     has_exclusive[2];
+               u32     exclusive_present;
+       };
+
        int             refcnt;         /* per-core: #HT threads */
        unsigned        core_id;        /* per-core: core id */
 };
@@ -172,7 +189,11 @@ struct cpu_hw_events {
                                             added in the current transaction */
        int                     assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
        u64                     tags[X86_PMC_IDX_MAX];
+
        struct perf_event       *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
+       struct event_constraint *event_constraint[X86_PMC_IDX_MAX];
+
+       int                     n_excl; /* the number of exclusive events */
 
        unsigned int            group_flag;
        int                     is_fake;
@@ -519,12 +540,10 @@ struct x86_pmu {
        void            (*put_event_constraints)(struct cpu_hw_events *cpuc,
                                                 struct perf_event *event);
 
-       void            (*commit_scheduling)(struct cpu_hw_events *cpuc,
-                                            struct perf_event *event,
-                                            int cntr);
-
        void            (*start_scheduling)(struct cpu_hw_events *cpuc);
 
+       void            (*commit_scheduling)(struct cpu_hw_events *cpuc, int idx, int cntr);
+
        void            (*stop_scheduling)(struct cpu_hw_events *cpuc);
 
        struct event_constraint *event_constraints;
@@ -697,6 +716,10 @@ int x86_add_exclusive(unsigned int what);
 
 void x86_del_exclusive(unsigned int what);
 
+int x86_reserve_hardware(void);
+
+void x86_release_hardware(void);
+
 void hw_perf_lbr_event_destroy(struct perf_event *event);
 
 int x86_setup_perfctr(struct perf_event *event);
@@ -717,8 +740,8 @@ static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
 
 void x86_pmu_enable_all(int added);
 
-int perf_assign_events(struct perf_event **events, int n,
-                       int wmin, int wmax, int *assign);
+int perf_assign_events(struct event_constraint **constraints, int n,
+                       int wmin, int wmax, int gpmax, int *assign);
 int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign);
 
 void x86_pmu_stop(struct perf_event *event, int flags);
@@ -860,6 +883,8 @@ void intel_pmu_pebs_enable_all(void);
 
 void intel_pmu_pebs_disable_all(void);
 
+void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in);
+
 void intel_ds_init(void);
 
 void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in);
@@ -929,4 +954,8 @@ static inline struct intel_shared_regs *allocate_shared_regs(int cpu)
        return NULL;
 }
 
+static inline int is_ht_workaround_enabled(void)
+{
+       return 0;
+}
 #endif /* CONFIG_CPU_SUP_INTEL */
index 960e85de13fbca45c317eeb8b719f05c7914ba1f..b9826a981fb20fa45a7c1255e277e9ad1cd5d150 100644 (file)
@@ -1134,7 +1134,7 @@ static __initconst const u64 slm_hw_cache_extra_regs
  [ C(LL  ) ] = {
        [ C(OP_READ) ] = {
                [ C(RESULT_ACCESS) ] = SLM_DMND_READ|SLM_LLC_ACCESS,
-               [ C(RESULT_MISS)   ] = SLM_DMND_READ|SLM_LLC_MISS,
+               [ C(RESULT_MISS)   ] = 0,
        },
        [ C(OP_WRITE) ] = {
                [ C(RESULT_ACCESS) ] = SLM_DMND_WRITE|SLM_LLC_ACCESS,
@@ -1184,8 +1184,7 @@ static __initconst const u64 slm_hw_cache_event_ids
        [ C(OP_READ) ] = {
                /* OFFCORE_RESPONSE.ANY_DATA.LOCAL_CACHE */
                [ C(RESULT_ACCESS) ] = 0x01b7,
-               /* OFFCORE_RESPONSE.ANY_DATA.ANY_LLC_MISS */
-               [ C(RESULT_MISS)   ] = 0x01b7,
+               [ C(RESULT_MISS)   ] = 0,
        },
        [ C(OP_WRITE) ] = {
                /* OFFCORE_RESPONSE.ANY_RFO.LOCAL_CACHE */
@@ -1217,7 +1216,7 @@ static __initconst const u64 slm_hw_cache_event_ids
  [ C(ITLB) ] = {
        [ C(OP_READ) ] = {
                [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
-               [ C(RESULT_MISS)   ] = 0x0282, /* ITLB.MISSES */
+               [ C(RESULT_MISS)   ] = 0x40205, /* PAGE_WALKS.I_SIDE_WALKS */
        },
        [ C(OP_WRITE) ] = {
                [ C(RESULT_ACCESS) ] = -1,
@@ -1904,9 +1903,8 @@ static void
 intel_start_scheduling(struct cpu_hw_events *cpuc)
 {
        struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
-       struct intel_excl_states *xl, *xlo;
+       struct intel_excl_states *xl;
        int tid = cpuc->excl_thread_id;
-       int o_tid = 1 - tid; /* sibling thread */
 
        /*
         * nothing needed if in group validation mode
@@ -1917,35 +1915,52 @@ intel_start_scheduling(struct cpu_hw_events *cpuc)
        /*
         * no exclusion needed
         */
-       if (!excl_cntrs)
+       if (WARN_ON_ONCE(!excl_cntrs))
                return;
 
-       xlo = &excl_cntrs->states[o_tid];
        xl = &excl_cntrs->states[tid];
 
        xl->sched_started = true;
-       xl->num_alloc_cntrs = 0;
        /*
         * lock shared state until we are done scheduling
         * in stop_event_scheduling()
         * makes scheduling appear as a transaction
         */
-       WARN_ON_ONCE(!irqs_disabled());
        raw_spin_lock(&excl_cntrs->lock);
+}
 
-       /*
-        * save initial state of sibling thread
-        */
-       memcpy(xlo->init_state, xlo->state, sizeof(xlo->init_state));
+static void intel_commit_scheduling(struct cpu_hw_events *cpuc, int idx, int cntr)
+{
+       struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
+       struct event_constraint *c = cpuc->event_constraint[idx];
+       struct intel_excl_states *xl;
+       int tid = cpuc->excl_thread_id;
+
+       if (cpuc->is_fake || !is_ht_workaround_enabled())
+               return;
+
+       if (WARN_ON_ONCE(!excl_cntrs))
+               return;
+
+       if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
+               return;
+
+       xl = &excl_cntrs->states[tid];
+
+       lockdep_assert_held(&excl_cntrs->lock);
+
+       if (c->flags & PERF_X86_EVENT_EXCL)
+               xl->state[cntr] = INTEL_EXCL_EXCLUSIVE;
+       else
+               xl->state[cntr] = INTEL_EXCL_SHARED;
 }
 
 static void
 intel_stop_scheduling(struct cpu_hw_events *cpuc)
 {
        struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
-       struct intel_excl_states *xl, *xlo;
+       struct intel_excl_states *xl;
        int tid = cpuc->excl_thread_id;
-       int o_tid = 1 - tid; /* sibling thread */
 
        /*
         * nothing needed if in group validation mode
@@ -1955,17 +1970,11 @@ intel_stop_scheduling(struct cpu_hw_events *cpuc)
        /*
         * no exclusion needed
         */
-       if (!excl_cntrs)
+       if (WARN_ON_ONCE(!excl_cntrs))
                return;
 
-       xlo = &excl_cntrs->states[o_tid];
        xl = &excl_cntrs->states[tid];
 
-       /*
-        * make new sibling thread state visible
-        */
-       memcpy(xlo->state, xlo->init_state, sizeof(xlo->state));
-
        xl->sched_started = false;
        /*
         * release shared state lock (acquired in intel_start_scheduling())
@@ -1977,12 +1986,10 @@ static struct event_constraint *
 intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
                           int idx, struct event_constraint *c)
 {
-       struct event_constraint *cx;
        struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
-       struct intel_excl_states *xl, *xlo;
-       int is_excl, i;
+       struct intel_excl_states *xlo;
        int tid = cpuc->excl_thread_id;
-       int o_tid = 1 - tid; /* alternate */
+       int is_excl, i;
 
        /*
         * validating a group does not require
@@ -1994,34 +2001,8 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
        /*
         * no exclusion needed
         */
-       if (!excl_cntrs)
+       if (WARN_ON_ONCE(!excl_cntrs))
                return c;
-       /*
-        * event requires exclusive counter access
-        * across HT threads
-        */
-       is_excl = c->flags & PERF_X86_EVENT_EXCL;
-
-       /*
-        * xl = state of current HT
-        * xlo = state of sibling HT
-        */
-       xl = &excl_cntrs->states[tid];
-       xlo = &excl_cntrs->states[o_tid];
-
-       /*
-        * do not allow scheduling of more than max_alloc_cntrs
-        * which is set to half the available generic counters.
-        * this helps avoid counter starvation of sibling thread
-        * by ensuring at most half the counters cannot be in
-        * exclusive mode. There is not designated counters for the
-        * limits. Any N/2 counters can be used. This helps with
-        * events with specifix counter constraints
-        */
-       if (xl->num_alloc_cntrs++ == xl->max_alloc_cntrs)
-               return &emptyconstraint;
-
-       cx = c;
 
        /*
         * because we modify the constraint, we need
@@ -2032,10 +2013,7 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
         * been cloned (marked dynamic)
         */
        if (!(c->flags & PERF_X86_EVENT_DYNAMIC)) {
-
-               /* sanity check */
-               if (idx < 0)
-                       return &emptyconstraint;
+               struct event_constraint *cx;
 
                /*
                 * grab pre-allocated constraint entry
@@ -2046,13 +2024,14 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
                 * initialize dynamic constraint
                 * with static constraint
                 */
-               memcpy(cx, c, sizeof(*cx));
+               *cx = *c;
 
                /*
                 * mark constraint as dynamic, so we
                 * can free it later on
                 */
                cx->flags |= PERF_X86_EVENT_DYNAMIC;
+               c = cx;
        }
 
        /*
@@ -2062,6 +2041,22 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
         * of this function
         */
 
+       /*
+        * state of sibling HT
+        */
+       xlo = &excl_cntrs->states[tid ^ 1];
+
+       /*
+        * event requires exclusive counter access
+        * across HT threads
+        */
+       is_excl = c->flags & PERF_X86_EVENT_EXCL;
+       if (is_excl && !(event->hw.flags & PERF_X86_EVENT_EXCL_ACCT)) {
+               event->hw.flags |= PERF_X86_EVENT_EXCL_ACCT;
+               if (!cpuc->n_excl++)
+                       WRITE_ONCE(excl_cntrs->has_exclusive[tid], 1);
+       }
+
        /*
         * Modify static constraint with current dynamic
         * state of thread
@@ -2070,44 +2065,44 @@ intel_get_excl_constraints(struct cpu_hw_events *cpuc, struct perf_event *event,
         * SHARED   : sibling counter measuring non-exclusive event
         * UNUSED   : sibling counter unused
         */
-       for_each_set_bit(i, cx->idxmsk, X86_PMC_IDX_MAX) {
+       for_each_set_bit(i, c->idxmsk, X86_PMC_IDX_MAX) {
                /*
                 * exclusive event in sibling counter
                 * our corresponding counter cannot be used
                 * regardless of our event
                 */
-               if (xl->state[i] == INTEL_EXCL_EXCLUSIVE)
-                       __clear_bit(i, cx->idxmsk);
+               if (xlo->state[i] == INTEL_EXCL_EXCLUSIVE)
+                       __clear_bit(i, c->idxmsk);
                /*
                 * if measuring an exclusive event, sibling
                 * measuring non-exclusive, then counter cannot
                 * be used
                 */
-               if (is_excl && xl->state[i] == INTEL_EXCL_SHARED)
-                       __clear_bit(i, cx->idxmsk);
+               if (is_excl && xlo->state[i] == INTEL_EXCL_SHARED)
+                       __clear_bit(i, c->idxmsk);
        }
 
        /*
         * recompute actual bit weight for scheduling algorithm
         */
-       cx->weight = hweight64(cx->idxmsk64);
+       c->weight = hweight64(c->idxmsk64);
 
        /*
         * if we return an empty mask, then switch
         * back to static empty constraint to avoid
         * the cost of freeing later on
         */
-       if (cx->weight == 0)
-               cx = &emptyconstraint;
+       if (c->weight == 0)
+               c = &emptyconstraint;
 
-       return cx;
+       return c;
 }
 
 static struct event_constraint *
 intel_get_event_constraints(struct cpu_hw_events *cpuc, int idx,
                            struct perf_event *event)
 {
-       struct event_constraint *c1 = event->hw.constraint;
+       struct event_constraint *c1 = cpuc->event_constraint[idx];
        struct event_constraint *c2;
 
        /*
@@ -2133,10 +2128,8 @@ static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
 {
        struct hw_perf_event *hwc = &event->hw;
        struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
-       struct intel_excl_states *xlo, *xl;
-       unsigned long flags = 0; /* keep compiler happy */
        int tid = cpuc->excl_thread_id;
-       int o_tid = 1 - tid;
+       struct intel_excl_states *xl;
 
        /*
         * nothing needed if in group validation mode
@@ -2144,31 +2137,35 @@ static void intel_put_excl_constraints(struct cpu_hw_events *cpuc,
        if (cpuc->is_fake)
                return;
 
-       WARN_ON_ONCE(!excl_cntrs);
-
-       if (!excl_cntrs)
+       if (WARN_ON_ONCE(!excl_cntrs))
                return;
 
-       xl = &excl_cntrs->states[tid];
-       xlo = &excl_cntrs->states[o_tid];
+       if (hwc->flags & PERF_X86_EVENT_EXCL_ACCT) {
+               hwc->flags &= ~PERF_X86_EVENT_EXCL_ACCT;
+               if (!--cpuc->n_excl)
+                       WRITE_ONCE(excl_cntrs->has_exclusive[tid], 0);
+       }
 
        /*
-        * put_constraint may be called from x86_schedule_events()
-        * which already has the lock held so here make locking
-        * conditional
+        * If event was actually assigned, then mark the counter state as
+        * unused now.
         */
-       if (!xl->sched_started)
-               raw_spin_lock_irqsave(&excl_cntrs->lock, flags);
+       if (hwc->idx >= 0) {
+               xl = &excl_cntrs->states[tid];
 
-       /*
-        * if event was actually assigned, then mark the
-        * counter state as unused now
-        */
-       if (hwc->idx >= 0)
-               xlo->state[hwc->idx] = INTEL_EXCL_UNUSED;
+               /*
+                * put_constraint may be called from x86_schedule_events()
+                * which already has the lock held so here make locking
+                * conditional.
+                */
+               if (!xl->sched_started)
+                       raw_spin_lock(&excl_cntrs->lock);
+
+               xl->state[hwc->idx] = INTEL_EXCL_UNUSED;
 
-       if (!xl->sched_started)
-               raw_spin_unlock_irqrestore(&excl_cntrs->lock, flags);
+               if (!xl->sched_started)
+                       raw_spin_unlock(&excl_cntrs->lock);
+       }
 }
 
 static void
@@ -2189,8 +2186,6 @@ intel_put_shared_regs_event_constraints(struct cpu_hw_events *cpuc,
 static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
                                        struct perf_event *event)
 {
-       struct event_constraint *c = event->hw.constraint;
-
        intel_put_shared_regs_event_constraints(cpuc, event);
 
        /*
@@ -2198,48 +2193,8 @@ static void intel_put_event_constraints(struct cpu_hw_events *cpuc,
         * all events are subject to and must call the
         * put_excl_constraints() routine
         */
-       if (c && cpuc->excl_cntrs)
+       if (cpuc->excl_cntrs)
                intel_put_excl_constraints(cpuc, event);
-
-       /* cleanup dynamic constraint */
-       if (c && (c->flags & PERF_X86_EVENT_DYNAMIC))
-               event->hw.constraint = NULL;
-}
-
-static void intel_commit_scheduling(struct cpu_hw_events *cpuc,
-                                   struct perf_event *event, int cntr)
-{
-       struct intel_excl_cntrs *excl_cntrs = cpuc->excl_cntrs;
-       struct event_constraint *c = event->hw.constraint;
-       struct intel_excl_states *xlo, *xl;
-       int tid = cpuc->excl_thread_id;
-       int o_tid = 1 - tid;
-       int is_excl;
-
-       if (cpuc->is_fake || !c)
-               return;
-
-       is_excl = c->flags & PERF_X86_EVENT_EXCL;
-
-       if (!(c->flags & PERF_X86_EVENT_DYNAMIC))
-               return;
-
-       WARN_ON_ONCE(!excl_cntrs);
-
-       if (!excl_cntrs)
-               return;
-
-       xl = &excl_cntrs->states[tid];
-       xlo = &excl_cntrs->states[o_tid];
-
-       WARN_ON_ONCE(!raw_spin_is_locked(&excl_cntrs->lock));
-
-       if (cntr >= 0) {
-               if (is_excl)
-                       xlo->init_state[cntr] = INTEL_EXCL_EXCLUSIVE;
-               else
-                       xlo->init_state[cntr] = INTEL_EXCL_SHARED;
-       }
 }
 
 static void intel_pebs_aliases_core2(struct perf_event *event)
@@ -2305,8 +2260,15 @@ static int intel_pmu_hw_config(struct perf_event *event)
        if (ret)
                return ret;
 
-       if (event->attr.precise_ip && x86_pmu.pebs_aliases)
-               x86_pmu.pebs_aliases(event);
+       if (event->attr.precise_ip) {
+               if (!event->attr.freq) {
+                       event->hw.flags |= PERF_X86_EVENT_AUTO_RELOAD;
+                       if (!(event->attr.sample_type & ~PEBS_FREERUNNING_FLAGS))
+                               event->hw.flags |= PERF_X86_EVENT_FREERUNNING;
+               }
+               if (x86_pmu.pebs_aliases)
+                       x86_pmu.pebs_aliases(event);
+       }
 
        if (needs_branch_stack(event)) {
                ret = intel_pmu_setup_lbr_filter(event);
@@ -2555,19 +2517,11 @@ struct intel_shared_regs *allocate_shared_regs(int cpu)
 static struct intel_excl_cntrs *allocate_excl_cntrs(int cpu)
 {
        struct intel_excl_cntrs *c;
-       int i;
 
        c = kzalloc_node(sizeof(struct intel_excl_cntrs),
                         GFP_KERNEL, cpu_to_node(cpu));
        if (c) {
                raw_spin_lock_init(&c->lock);
-               for (i = 0; i < X86_PMC_IDX_MAX; i++) {
-                       c->states[0].state[i] = INTEL_EXCL_UNUSED;
-                       c->states[0].init_state[i] = INTEL_EXCL_UNUSED;
-
-                       c->states[1].state[i] = INTEL_EXCL_UNUSED;
-                       c->states[1].init_state[i] = INTEL_EXCL_UNUSED;
-               }
                c->core_id = -1;
        }
        return c;
@@ -2622,7 +2576,7 @@ static void intel_pmu_cpu_starting(int cpu)
        if (!(x86_pmu.flags & PMU_FL_NO_HT_SHARING)) {
                void **onln = &cpuc->kfree_on_online[X86_PERF_KFREE_SHARED];
 
-               for_each_cpu(i, topology_thread_cpumask(cpu)) {
+               for_each_cpu(i, topology_sibling_cpumask(cpu)) {
                        struct intel_shared_regs *pc;
 
                        pc = per_cpu(cpu_hw_events, i).shared_regs;
@@ -2640,9 +2594,7 @@ static void intel_pmu_cpu_starting(int cpu)
                cpuc->lbr_sel = &cpuc->shared_regs->regs[EXTRA_REG_LBR];
 
        if (x86_pmu.flags & PMU_FL_EXCL_CNTRS) {
-               int h = x86_pmu.num_counters >> 1;
-
-               for_each_cpu(i, topology_thread_cpumask(cpu)) {
+               for_each_cpu(i, topology_sibling_cpumask(cpu)) {
                        struct intel_excl_cntrs *c;
 
                        c = per_cpu(cpu_hw_events, i).excl_cntrs;
@@ -2655,11 +2607,6 @@ static void intel_pmu_cpu_starting(int cpu)
                }
                cpuc->excl_cntrs->core_id = core_id;
                cpuc->excl_cntrs->refcnt++;
-               /*
-                * set hard limit to half the number of generic counters
-                */
-               cpuc->excl_cntrs->states[0].max_alloc_cntrs = h;
-               cpuc->excl_cntrs->states[1].max_alloc_cntrs = h;
        }
 }
 
@@ -2695,6 +2642,15 @@ static void intel_pmu_cpu_dying(int cpu)
        fini_debug_store_on_cpu(cpu);
 }
 
+static void intel_pmu_sched_task(struct perf_event_context *ctx,
+                                bool sched_in)
+{
+       if (x86_pmu.pebs_active)
+               intel_pmu_pebs_sched_task(ctx, sched_in);
+       if (x86_pmu.lbr_nr)
+               intel_pmu_lbr_sched_task(ctx, sched_in);
+}
+
 PMU_FORMAT_ATTR(offcore_rsp, "config1:0-63");
 
 PMU_FORMAT_ATTR(ldlat, "config1:0-15");
@@ -2784,7 +2740,7 @@ static __initconst const struct x86_pmu intel_pmu = {
        .cpu_starting           = intel_pmu_cpu_starting,
        .cpu_dying              = intel_pmu_cpu_dying,
        .guest_get_msrs         = intel_guest_get_msrs,
-       .sched_task             = intel_pmu_lbr_sched_task,
+       .sched_task             = intel_pmu_sched_task,
 };
 
 static __init void intel_clovertown_quirk(void)
@@ -2957,8 +2913,8 @@ static __init void intel_ht_bug(void)
 {
        x86_pmu.flags |= PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED;
 
-       x86_pmu.commit_scheduling = intel_commit_scheduling;
        x86_pmu.start_scheduling = intel_start_scheduling;
+       x86_pmu.commit_scheduling = intel_commit_scheduling;
        x86_pmu.stop_scheduling = intel_stop_scheduling;
 }
 
@@ -3271,6 +3227,8 @@ __init int intel_pmu_init(void)
 
        case 61: /* 14nm Broadwell Core-M */
        case 86: /* 14nm Broadwell Xeon D */
+       case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
+       case 79: /* 14nm Broadwell Server */
                x86_pmu.late_ack = true;
                memcpy(hw_cache_event_ids, hsw_hw_cache_event_ids, sizeof(hw_cache_event_ids));
                memcpy(hw_cache_extra_regs, hsw_hw_cache_extra_regs, sizeof(hw_cache_extra_regs));
@@ -3340,13 +3298,13 @@ __init int intel_pmu_init(void)
                 * counter, so do not extend mask to generic counters
                 */
                for_each_event_constraint(c, x86_pmu.event_constraints) {
-                       if (c->cmask != FIXED_EVENT_FLAGS
-                           || c->idxmsk64 == INTEL_PMC_MSK_FIXED_REF_CYCLES) {
-                               continue;
+                       if (c->cmask == FIXED_EVENT_FLAGS
+                           && c->idxmsk64 != INTEL_PMC_MSK_FIXED_REF_CYCLES) {
+                               c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
                        }
-
-                       c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
-                       c->weight += x86_pmu.num_counters;
+                       c->idxmsk64 &=
+                               ~(~0UL << (INTEL_PMC_IDX_FIXED + x86_pmu.num_counters_fixed));
+                       c->weight = hweight64(c->idxmsk64);
                }
        }
 
@@ -3404,7 +3362,7 @@ static __init int fixup_ht_bug(void)
        if (!(x86_pmu.flags & PMU_FL_EXCL_ENABLED))
                return 0;
 
-       w = cpumask_weight(topology_thread_cpumask(cpu));
+       w = cpumask_weight(topology_sibling_cpumask(cpu));
        if (w > 1) {
                pr_info("PMU erratum BJ122, BV98, HSD29 worked around, HT is on\n");
                return 0;
@@ -3414,8 +3372,8 @@ static __init int fixup_ht_bug(void)
 
        x86_pmu.flags &= ~(PMU_FL_EXCL_CNTRS | PMU_FL_EXCL_ENABLED);
 
-       x86_pmu.commit_scheduling = NULL;
        x86_pmu.start_scheduling = NULL;
+       x86_pmu.commit_scheduling = NULL;
        x86_pmu.stop_scheduling = NULL;
 
        watchdog_nmi_enable_all();
index ac1f0c55f3796e17bdaddc5f946a41509890e446..7795f3f8b1d57198469ded20ac9a1244035428e8 100644 (file)
@@ -483,17 +483,26 @@ static int bts_event_add(struct perf_event *event, int mode)
 
 static void bts_event_destroy(struct perf_event *event)
 {
+       x86_release_hardware();
        x86_del_exclusive(x86_lbr_exclusive_bts);
 }
 
 static int bts_event_init(struct perf_event *event)
 {
+       int ret;
+
        if (event->attr.type != bts_pmu.type)
                return -ENOENT;
 
        if (x86_add_exclusive(x86_lbr_exclusive_bts))
                return -EBUSY;
 
+       ret = x86_reserve_hardware();
+       if (ret) {
+               x86_del_exclusive(x86_lbr_exclusive_bts);
+               return ret;
+       }
+
        event->destroy = bts_event_destroy;
 
        return 0;
index e4d1b8b738fa8a9fc350030c0b41dd75f5309530..188076161c1be51afe135d6289b8ce0e96952bf3 100644 (file)
 #define MSR_IA32_QM_CTR                0x0c8e
 #define MSR_IA32_QM_EVTSEL     0x0c8d
 
-static unsigned int cqm_max_rmid = -1;
+static u32 cqm_max_rmid = -1;
 static unsigned int cqm_l3_scale; /* supposedly cacheline size */
 
-struct intel_cqm_state {
-       raw_spinlock_t          lock;
-       int                     rmid;
-       int                     cnt;
+/**
+ * struct intel_pqr_state - State cache for the PQR MSR
+ * @rmid:              The cached Resource Monitoring ID
+ * @closid:            The cached Class Of Service ID
+ * @rmid_usecnt:       The usage counter for rmid
+ *
+ * The upper 32 bits of MSR_IA32_PQR_ASSOC contain closid and the
+ * lower 10 bits rmid. The update to MSR_IA32_PQR_ASSOC always
+ * contains both parts, so we need to cache them.
+ *
+ * The cache also helps to avoid pointless updates if the value does
+ * not change.
+ */
+struct intel_pqr_state {
+       u32                     rmid;
+       u32                     closid;
+       int                     rmid_usecnt;
 };
 
-static DEFINE_PER_CPU(struct intel_cqm_state, cqm_state);
+/*
+ * The cached intel_pqr_state is strictly per CPU and can never be
+ * updated from a remote CPU. Both functions which modify the state
+ * (intel_cqm_event_start and intel_cqm_event_stop) are called with
+ * interrupts disabled, which is sufficient for the protection.
+ */
+static DEFINE_PER_CPU(struct intel_pqr_state, pqr_state);
 
 /*
  * Protects cache_cgroups and cqm_rmid_free_lru and cqm_rmid_limbo_lru.
@@ -57,7 +76,7 @@ static cpumask_t cqm_cpumask;
  * near-zero occupancy value, i.e. no cachelines are tagged with this
  * RMID, once __intel_cqm_rmid_rotate() returns.
  */
-static unsigned int intel_cqm_rotation_rmid;
+static u32 intel_cqm_rotation_rmid;
 
 #define INVALID_RMID           (-1)
 
@@ -69,7 +88,7 @@ static unsigned int intel_cqm_rotation_rmid;
  * Likewise, an rmid value of -1 is used to indicate "no rmid currently
  * assigned" and is used as part of the rotation code.
  */
-static inline bool __rmid_valid(unsigned int rmid)
+static inline bool __rmid_valid(u32 rmid)
 {
        if (!rmid || rmid == INVALID_RMID)
                return false;
@@ -77,7 +96,7 @@ static inline bool __rmid_valid(unsigned int rmid)
        return true;
 }
 
-static u64 __rmid_read(unsigned int rmid)
+static u64 __rmid_read(u32 rmid)
 {
        u64 val;
 
@@ -102,7 +121,7 @@ enum rmid_recycle_state {
 };
 
 struct cqm_rmid_entry {
-       unsigned int rmid;
+       u32 rmid;
        enum rmid_recycle_state state;
        struct list_head list;
        unsigned long queue_time;
@@ -147,7 +166,7 @@ static LIST_HEAD(cqm_rmid_limbo_lru);
  */
 static struct cqm_rmid_entry **cqm_rmid_ptrs;
 
-static inline struct cqm_rmid_entry *__rmid_entry(int rmid)
+static inline struct cqm_rmid_entry *__rmid_entry(u32 rmid)
 {
        struct cqm_rmid_entry *entry;
 
@@ -162,7 +181,7 @@ static inline struct cqm_rmid_entry *__rmid_entry(int rmid)
  *
  * We expect to be called with cache_mutex held.
  */
-static int __get_rmid(void)
+static u32 __get_rmid(void)
 {
        struct cqm_rmid_entry *entry;
 
@@ -177,7 +196,7 @@ static int __get_rmid(void)
        return entry->rmid;
 }
 
-static void __put_rmid(unsigned int rmid)
+static void __put_rmid(u32 rmid)
 {
        struct cqm_rmid_entry *entry;
 
@@ -372,7 +391,7 @@ static bool __conflict_event(struct perf_event *a, struct perf_event *b)
 }
 
 struct rmid_read {
-       unsigned int rmid;
+       u32 rmid;
        atomic64_t value;
 };
 
@@ -381,12 +400,11 @@ static void __intel_cqm_event_count(void *info);
 /*
  * Exchange the RMID of a group of events.
  */
-static unsigned int
-intel_cqm_xchg_rmid(struct perf_event *group, unsigned int rmid)
+static u32 intel_cqm_xchg_rmid(struct perf_event *group, u32 rmid)
 {
        struct perf_event *event;
-       unsigned int old_rmid = group->hw.cqm_rmid;
        struct list_head *head = &group->hw.cqm_group_entry;
+       u32 old_rmid = group->hw.cqm_rmid;
 
        lockdep_assert_held(&cache_mutex);
 
@@ -451,7 +469,7 @@ static void intel_cqm_stable(void *arg)
  * If we have group events waiting for an RMID that don't conflict with
  * events already running, assign @rmid.
  */
-static bool intel_cqm_sched_in_event(unsigned int rmid)
+static bool intel_cqm_sched_in_event(u32 rmid)
 {
        struct perf_event *leader, *event;
 
@@ -598,7 +616,7 @@ static bool intel_cqm_rmid_stabilize(unsigned int *available)
 static void __intel_cqm_pick_and_rotate(struct perf_event *next)
 {
        struct perf_event *rotor;
-       unsigned int rmid;
+       u32 rmid;
 
        lockdep_assert_held(&cache_mutex);
 
@@ -626,7 +644,7 @@ static void __intel_cqm_pick_and_rotate(struct perf_event *next)
 static void intel_cqm_sched_out_conflicting_events(struct perf_event *event)
 {
        struct perf_event *group, *g;
-       unsigned int rmid;
+       u32 rmid;
 
        lockdep_assert_held(&cache_mutex);
 
@@ -828,8 +846,8 @@ static void intel_cqm_setup_event(struct perf_event *event,
                                  struct perf_event **group)
 {
        struct perf_event *iter;
-       unsigned int rmid;
        bool conflict = false;
+       u32 rmid;
 
        list_for_each_entry(iter, &cache_groups, hw.cqm_groups_entry) {
                rmid = iter->hw.cqm_rmid;
@@ -860,7 +878,7 @@ static void intel_cqm_setup_event(struct perf_event *event,
 static void intel_cqm_event_read(struct perf_event *event)
 {
        unsigned long flags;
-       unsigned int rmid;
+       u32 rmid;
        u64 val;
 
        /*
@@ -961,55 +979,48 @@ out:
 
 static void intel_cqm_event_start(struct perf_event *event, int mode)
 {
-       struct intel_cqm_state *state = this_cpu_ptr(&cqm_state);
-       unsigned int rmid = event->hw.cqm_rmid;
-       unsigned long flags;
+       struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
+       u32 rmid = event->hw.cqm_rmid;
 
        if (!(event->hw.cqm_state & PERF_HES_STOPPED))
                return;
 
        event->hw.cqm_state &= ~PERF_HES_STOPPED;
 
-       raw_spin_lock_irqsave(&state->lock, flags);
-
-       if (state->cnt++)
-               WARN_ON_ONCE(state->rmid != rmid);
-       else
+       if (state->rmid_usecnt++) {
+               if (!WARN_ON_ONCE(state->rmid != rmid))
+                       return;
+       } else {
                WARN_ON_ONCE(state->rmid);
+       }
 
        state->rmid = rmid;
-       wrmsrl(MSR_IA32_PQR_ASSOC, state->rmid);
-
-       raw_spin_unlock_irqrestore(&state->lock, flags);
+       wrmsr(MSR_IA32_PQR_ASSOC, rmid, state->closid);
 }
 
 static void intel_cqm_event_stop(struct perf_event *event, int mode)
 {
-       struct intel_cqm_state *state = this_cpu_ptr(&cqm_state);
-       unsigned long flags;
+       struct intel_pqr_state *state = this_cpu_ptr(&pqr_state);
 
        if (event->hw.cqm_state & PERF_HES_STOPPED)
                return;
 
        event->hw.cqm_state |= PERF_HES_STOPPED;
 
-       raw_spin_lock_irqsave(&state->lock, flags);
        intel_cqm_event_read(event);
 
-       if (!--state->cnt) {
+       if (!--state->rmid_usecnt) {
                state->rmid = 0;
-               wrmsrl(MSR_IA32_PQR_ASSOC, 0);
+               wrmsr(MSR_IA32_PQR_ASSOC, 0, state->closid);
        } else {
                WARN_ON_ONCE(!state->rmid);
        }
-
-       raw_spin_unlock_irqrestore(&state->lock, flags);
 }
 
 static int intel_cqm_event_add(struct perf_event *event, int mode)
 {
        unsigned long flags;
-       unsigned int rmid;
+       u32 rmid;
 
        raw_spin_lock_irqsave(&cache_lock, flags);
 
@@ -1024,11 +1035,6 @@ static int intel_cqm_event_add(struct perf_event *event, int mode)
        return 0;
 }
 
-static void intel_cqm_event_del(struct perf_event *event, int mode)
-{
-       intel_cqm_event_stop(event, mode);
-}
-
 static void intel_cqm_event_destroy(struct perf_event *event)
 {
        struct perf_event *group_other = NULL;
@@ -1057,7 +1063,7 @@ static void intel_cqm_event_destroy(struct perf_event *event)
                        list_replace(&event->hw.cqm_groups_entry,
                                     &group_other->hw.cqm_groups_entry);
                } else {
-                       unsigned int rmid = event->hw.cqm_rmid;
+                       u32 rmid = event->hw.cqm_rmid;
 
                        if (__rmid_valid(rmid))
                                __put_rmid(rmid);
@@ -1221,7 +1227,7 @@ static struct pmu intel_cqm_pmu = {
        .task_ctx_nr         = perf_sw_context,
        .event_init          = intel_cqm_event_init,
        .add                 = intel_cqm_event_add,
-       .del                 = intel_cqm_event_del,
+       .del                 = intel_cqm_event_stop,
        .start               = intel_cqm_event_start,
        .stop                = intel_cqm_event_stop,
        .read                = intel_cqm_event_read,
@@ -1243,12 +1249,12 @@ static inline void cqm_pick_event_reader(int cpu)
 
 static void intel_cqm_cpu_prepare(unsigned int cpu)
 {
-       struct intel_cqm_state *state = &per_cpu(cqm_state, cpu);
+       struct intel_pqr_state *state = &per_cpu(pqr_state, cpu);
        struct cpuinfo_x86 *c = &cpu_data(cpu);
 
-       raw_spin_lock_init(&state->lock);
        state->rmid = 0;
-       state->cnt  = 0;
+       state->closid = 0;
+       state->rmid_usecnt = 0;
 
        WARN_ON(c->x86_cache_max_rmid != cqm_max_rmid);
        WARN_ON(c->x86_cache_occ_scale != cqm_l3_scale);
index 813f75d71175e3a117f13ec53efe6856a0508bec..71fc40238843bb0e80ad1be14e38e7f7b3787eb5 100644 (file)
@@ -11,7 +11,7 @@
 #define BTS_RECORD_SIZE                24
 
 #define BTS_BUFFER_SIZE                (PAGE_SIZE << 4)
-#define PEBS_BUFFER_SIZE       PAGE_SIZE
+#define PEBS_BUFFER_SIZE       (PAGE_SIZE << 4)
 #define PEBS_FIXUP_SIZE                PAGE_SIZE
 
 /*
@@ -250,7 +250,7 @@ static int alloc_pebs_buffer(int cpu)
 {
        struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
        int node = cpu_to_node(cpu);
-       int max, thresh = 1; /* always use a single PEBS record */
+       int max;
        void *buffer, *ibuffer;
 
        if (!x86_pmu.pebs)
@@ -280,9 +280,6 @@ static int alloc_pebs_buffer(int cpu)
        ds->pebs_absolute_maximum = ds->pebs_buffer_base +
                max * x86_pmu.pebs_record_size;
 
-       ds->pebs_interrupt_threshold = ds->pebs_buffer_base +
-               thresh * x86_pmu.pebs_record_size;
-
        return 0;
 }
 
@@ -549,6 +546,19 @@ int intel_pmu_drain_bts_buffer(void)
        return 1;
 }
 
+static inline void intel_pmu_drain_pebs_buffer(void)
+{
+       struct pt_regs regs;
+
+       x86_pmu.drain_pebs(&regs);
+}
+
+void intel_pmu_pebs_sched_task(struct perf_event_context *ctx, bool sched_in)
+{
+       if (!sched_in)
+               intel_pmu_drain_pebs_buffer();
+}
+
 /*
  * PEBS
  */
@@ -684,33 +694,81 @@ struct event_constraint *intel_pebs_constraints(struct perf_event *event)
        return &emptyconstraint;
 }
 
+static inline bool pebs_is_enabled(struct cpu_hw_events *cpuc)
+{
+       return (cpuc->pebs_enabled & ((1ULL << MAX_PEBS_EVENTS) - 1));
+}
+
 void intel_pmu_pebs_enable(struct perf_event *event)
 {
        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
+       struct debug_store *ds = cpuc->ds;
+       bool first_pebs;
+       u64 threshold;
 
        hwc->config &= ~ARCH_PERFMON_EVENTSEL_INT;
 
+       first_pebs = !pebs_is_enabled(cpuc);
        cpuc->pebs_enabled |= 1ULL << hwc->idx;
 
        if (event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT)
                cpuc->pebs_enabled |= 1ULL << (hwc->idx + 32);
        else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
                cpuc->pebs_enabled |= 1ULL << 63;
+
+       /*
+        * When the event is constrained enough we can use a larger
+        * threshold and run the event with less frequent PMI.
+        */
+       if (hwc->flags & PERF_X86_EVENT_FREERUNNING) {
+               threshold = ds->pebs_absolute_maximum -
+                       x86_pmu.max_pebs_events * x86_pmu.pebs_record_size;
+
+               if (first_pebs)
+                       perf_sched_cb_inc(event->ctx->pmu);
+       } else {
+               threshold = ds->pebs_buffer_base + x86_pmu.pebs_record_size;
+
+               /*
+                * If not all events can use larger buffer,
+                * roll back to threshold = 1
+                */
+               if (!first_pebs &&
+                   (ds->pebs_interrupt_threshold > threshold))
+                       perf_sched_cb_dec(event->ctx->pmu);
+       }
+
+       /* Use auto-reload if possible to save a MSR write in the PMI */
+       if (hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) {
+               ds->pebs_event_reset[hwc->idx] =
+                       (u64)(-hwc->sample_period) & x86_pmu.cntval_mask;
+       }
+
+       if (first_pebs || ds->pebs_interrupt_threshold > threshold)
+               ds->pebs_interrupt_threshold = threshold;
 }
 
 void intel_pmu_pebs_disable(struct perf_event *event)
 {
        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
        struct hw_perf_event *hwc = &event->hw;
+       struct debug_store *ds = cpuc->ds;
 
        cpuc->pebs_enabled &= ~(1ULL << hwc->idx);
 
-       if (event->hw.constraint->flags & PERF_X86_EVENT_PEBS_LDLAT)
+       if (event->hw.flags & PERF_X86_EVENT_PEBS_LDLAT)
                cpuc->pebs_enabled &= ~(1ULL << (hwc->idx + 32));
-       else if (event->hw.constraint->flags & PERF_X86_EVENT_PEBS_ST)
+       else if (event->hw.flags & PERF_X86_EVENT_PEBS_ST)
                cpuc->pebs_enabled &= ~(1ULL << 63);
 
+       if (ds->pebs_interrupt_threshold >
+           ds->pebs_buffer_base + x86_pmu.pebs_record_size) {
+               intel_pmu_drain_pebs_buffer();
+               if (!pebs_is_enabled(cpuc))
+                       perf_sched_cb_dec(event->ctx->pmu);
+       }
+
        if (cpuc->enabled)
                wrmsrl(MSR_IA32_PEBS_ENABLE, cpuc->pebs_enabled);
 
@@ -846,8 +904,10 @@ static inline u64 intel_hsw_transaction(struct pebs_record_hsw *pebs)
        return txn;
 }
 
-static void __intel_pmu_pebs_event(struct perf_event *event,
-                                  struct pt_regs *iregs, void *__pebs)
+static void setup_pebs_sample_data(struct perf_event *event,
+                                  struct pt_regs *iregs, void *__pebs,
+                                  struct perf_sample_data *data,
+                                  struct pt_regs *regs)
 {
 #define PERF_X86_EVENT_PEBS_HSW_PREC \
                (PERF_X86_EVENT_PEBS_ST_HSW | \
@@ -859,13 +919,11 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
         */
        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
        struct pebs_record_hsw *pebs = __pebs;
-       struct perf_sample_data data;
-       struct pt_regs regs;
        u64 sample_type;
        int fll, fst, dsrc;
        int fl = event->hw.flags;
 
-       if (!intel_pmu_save_and_restart(event))
+       if (pebs == NULL)
                return;
 
        sample_type = event->attr.sample_type;
@@ -874,15 +932,15 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
        fll = fl & PERF_X86_EVENT_PEBS_LDLAT;
        fst = fl & (PERF_X86_EVENT_PEBS_ST | PERF_X86_EVENT_PEBS_HSW_PREC);
 
-       perf_sample_data_init(&data, 0, event->hw.last_period);
+       perf_sample_data_init(data, 0, event->hw.last_period);
 
-       data.period = event->hw.last_period;
+       data->period = event->hw.last_period;
 
        /*
         * Use latency for weight (only avail with PEBS-LL)
         */
        if (fll && (sample_type & PERF_SAMPLE_WEIGHT))
-               data.weight = pebs->lat;
+               data->weight = pebs->lat;
 
        /*
         * data.data_src encodes the data source
@@ -895,7 +953,7 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
                        val = precise_datala_hsw(event, pebs->dse);
                else if (fst)
                        val = precise_store_data(pebs->dse);
-               data.data_src.val = val;
+               data->data_src.val = val;
        }
 
        /*
@@ -908,61 +966,123 @@ static void __intel_pmu_pebs_event(struct perf_event *event,
         * PERF_SAMPLE_IP and PERF_SAMPLE_CALLCHAIN to function properly.
         * A possible PERF_SAMPLE_REGS will have to transfer all regs.
         */
-       regs = *iregs;
-       regs.flags = pebs->flags;
-       set_linear_ip(&regs, pebs->ip);
-       regs.bp = pebs->bp;
-       regs.sp = pebs->sp;
+       *regs = *iregs;
+       regs->flags = pebs->flags;
+       set_linear_ip(regs, pebs->ip);
+       regs->bp = pebs->bp;
+       regs->sp = pebs->sp;
 
        if (sample_type & PERF_SAMPLE_REGS_INTR) {
-               regs.ax = pebs->ax;
-               regs.bx = pebs->bx;
-               regs.cx = pebs->cx;
-               regs.dx = pebs->dx;
-               regs.si = pebs->si;
-               regs.di = pebs->di;
-               regs.bp = pebs->bp;
-               regs.sp = pebs->sp;
-
-               regs.flags = pebs->flags;
+               regs->ax = pebs->ax;
+               regs->bx = pebs->bx;
+               regs->cx = pebs->cx;
+               regs->dx = pebs->dx;
+               regs->si = pebs->si;
+               regs->di = pebs->di;
+               regs->bp = pebs->bp;
+               regs->sp = pebs->sp;
+
+               regs->flags = pebs->flags;
 #ifndef CONFIG_X86_32
-               regs.r8 = pebs->r8;
-               regs.r9 = pebs->r9;
-               regs.r10 = pebs->r10;
-               regs.r11 = pebs->r11;
-               regs.r12 = pebs->r12;
-               regs.r13 = pebs->r13;
-               regs.r14 = pebs->r14;
-               regs.r15 = pebs->r15;
+               regs->r8 = pebs->r8;
+               regs->r9 = pebs->r9;
+               regs->r10 = pebs->r10;
+               regs->r11 = pebs->r11;
+               regs->r12 = pebs->r12;
+               regs->r13 = pebs->r13;
+               regs->r14 = pebs->r14;
+               regs->r15 = pebs->r15;
 #endif
        }
 
        if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format >= 2) {
-               regs.ip = pebs->real_ip;
-               regs.flags |= PERF_EFLAGS_EXACT;
-       } else if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(&regs))
-               regs.flags |= PERF_EFLAGS_EXACT;
+               regs->ip = pebs->real_ip;
+               regs->flags |= PERF_EFLAGS_EXACT;
+       } else if (event->attr.precise_ip > 1 && intel_pmu_pebs_fixup_ip(regs))
+               regs->flags |= PERF_EFLAGS_EXACT;
        else
-               regs.flags &= ~PERF_EFLAGS_EXACT;
+               regs->flags &= ~PERF_EFLAGS_EXACT;
 
        if ((sample_type & PERF_SAMPLE_ADDR) &&
            x86_pmu.intel_cap.pebs_format >= 1)
-               data.addr = pebs->dla;
+               data->addr = pebs->dla;
 
        if (x86_pmu.intel_cap.pebs_format >= 2) {
                /* Only set the TSX weight when no memory weight. */
                if ((sample_type & PERF_SAMPLE_WEIGHT) && !fll)
-                       data.weight = intel_hsw_weight(pebs);
+                       data->weight = intel_hsw_weight(pebs);
 
                if (sample_type & PERF_SAMPLE_TRANSACTION)
-                       data.txn = intel_hsw_transaction(pebs);
+                       data->txn = intel_hsw_transaction(pebs);
        }
 
        if (has_branch_stack(event))
-               data.br_stack = &cpuc->lbr_stack;
+               data->br_stack = &cpuc->lbr_stack;
+}
+
+static inline void *
+get_next_pebs_record_by_bit(void *base, void *top, int bit)
+{
+       struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+       void *at;
+       u64 pebs_status;
+
+       if (base == NULL)
+               return NULL;
+
+       for (at = base; at < top; at += x86_pmu.pebs_record_size) {
+               struct pebs_record_nhm *p = at;
 
-       if (perf_event_overflow(event, &data, &regs))
+               if (test_bit(bit, (unsigned long *)&p->status)) {
+                       /* PEBS v3 has accurate status bits */
+                       if (x86_pmu.intel_cap.pebs_format >= 3)
+                               return at;
+
+                       if (p->status == (1 << bit))
+                               return at;
+
+                       /* clear non-PEBS bit and re-check */
+                       pebs_status = p->status & cpuc->pebs_enabled;
+                       pebs_status &= (1ULL << MAX_PEBS_EVENTS) - 1;
+                       if (pebs_status == (1 << bit))
+                               return at;
+               }
+       }
+       return NULL;
+}
+
+static void __intel_pmu_pebs_event(struct perf_event *event,
+                                  struct pt_regs *iregs,
+                                  void *base, void *top,
+                                  int bit, int count)
+{
+       struct perf_sample_data data;
+       struct pt_regs regs;
+       void *at = get_next_pebs_record_by_bit(base, top, bit);
+
+       if (!intel_pmu_save_and_restart(event) &&
+           !(event->hw.flags & PERF_X86_EVENT_AUTO_RELOAD))
+               return;
+
+       while (count > 1) {
+               setup_pebs_sample_data(event, iregs, at, &data, &regs);
+               perf_event_output(event, &data, &regs);
+               at += x86_pmu.pebs_record_size;
+               at = get_next_pebs_record_by_bit(at, top, bit);
+               count--;
+       }
+
+       setup_pebs_sample_data(event, iregs, at, &data, &regs);
+
+       /*
+        * All but the last records are processed.
+        * The last one is left to be able to call the overflow handler.
+        */
+       if (perf_event_overflow(event, &data, &regs)) {
                x86_pmu_stop(event, 0);
+               return;
+       }
+
 }
 
 static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
@@ -992,72 +1112,99 @@ static void intel_pmu_drain_pebs_core(struct pt_regs *iregs)
        if (!event->attr.precise_ip)
                return;
 
-       n = top - at;
+       n = (top - at) / x86_pmu.pebs_record_size;
        if (n <= 0)
                return;
 
-       /*
-        * Should not happen, we program the threshold at 1 and do not
-        * set a reset value.
-        */
-       WARN_ONCE(n > 1, "bad leftover pebs %d\n", n);
-       at += n - 1;
-
-       __intel_pmu_pebs_event(event, iregs, at);
+       __intel_pmu_pebs_event(event, iregs, at, top, 0, n);
 }
 
 static void intel_pmu_drain_pebs_nhm(struct pt_regs *iregs)
 {
        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
        struct debug_store *ds = cpuc->ds;
-       struct perf_event *event = NULL;
-       void *at, *top;
-       u64 status = 0;
-       int bit;
+       struct perf_event *event;
+       void *base, *at, *top;
+       short counts[MAX_PEBS_EVENTS] = {};
+       short error[MAX_PEBS_EVENTS] = {};
+       int bit, i;
 
        if (!x86_pmu.pebs_active)
                return;
 
-       at  = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
+       base = (struct pebs_record_nhm *)(unsigned long)ds->pebs_buffer_base;
        top = (struct pebs_record_nhm *)(unsigned long)ds->pebs_index;
 
        ds->pebs_index = ds->pebs_buffer_base;
 
-       if (unlikely(at > top))
+       if (unlikely(base >= top))
                return;
 
-       /*
-        * Should not happen, we program the threshold at 1 and do not
-        * set a reset value.
-        */
-       WARN_ONCE(top - at > x86_pmu.max_pebs_events * x86_pmu.pebs_record_size,
-                 "Unexpected number of pebs records %ld\n",
-                 (long)(top - at) / x86_pmu.pebs_record_size);
-
-       for (; at < top; at += x86_pmu.pebs_record_size) {
+       for (at = base; at < top; at += x86_pmu.pebs_record_size) {
                struct pebs_record_nhm *p = at;
 
-               for_each_set_bit(bit, (unsigned long *)&p->status,
-                                x86_pmu.max_pebs_events) {
-                       event = cpuc->events[bit];
-                       if (!test_bit(bit, cpuc->active_mask))
-                               continue;
-
-                       WARN_ON_ONCE(!event);
+               /* PEBS v3 has accurate status bits */
+               if (x86_pmu.intel_cap.pebs_format >= 3) {
+                       for_each_set_bit(bit, (unsigned long *)&p->status,
+                                        MAX_PEBS_EVENTS)
+                               counts[bit]++;
 
-                       if (!event->attr.precise_ip)
-                               continue;
+                       continue;
+               }
 
-                       if (__test_and_set_bit(bit, (unsigned long *)&status))
+               bit = find_first_bit((unsigned long *)&p->status,
+                                       x86_pmu.max_pebs_events);
+               if (bit >= x86_pmu.max_pebs_events)
+                       continue;
+               if (!test_bit(bit, cpuc->active_mask))
+                       continue;
+               /*
+                * The PEBS hardware does not deal well with the situation
+                * when events happen near to each other and multiple bits
+                * are set. But it should happen rarely.
+                *
+                * If these events include one PEBS and multiple non-PEBS
+                * events, it doesn't impact PEBS record. The record will
+                * be handled normally. (slow path)
+                *
+                * If these events include two or more PEBS events, the
+                * records for the events can be collapsed into a single
+                * one, and it's not possible to reconstruct all events
+                * that caused the PEBS record. It's called collision.
+                * If collision happened, the record will be dropped.
+                *
+                */
+               if (p->status != (1 << bit)) {
+                       u64 pebs_status;
+
+                       /* slow path */
+                       pebs_status = p->status & cpuc->pebs_enabled;
+                       pebs_status &= (1ULL << MAX_PEBS_EVENTS) - 1;
+                       if (pebs_status != (1 << bit)) {
+                               for_each_set_bit(i, (unsigned long *)&pebs_status,
+                                                MAX_PEBS_EVENTS)
+                                       error[i]++;
                                continue;
-
-                       break;
+                       }
                }
+               counts[bit]++;
+       }
 
-               if (!event || bit >= x86_pmu.max_pebs_events)
+       for (bit = 0; bit < x86_pmu.max_pebs_events; bit++) {
+               if ((counts[bit] == 0) && (error[bit] == 0))
                        continue;
+               event = cpuc->events[bit];
+               WARN_ON_ONCE(!event);
+               WARN_ON_ONCE(!event->attr.precise_ip);
 
-               __intel_pmu_pebs_event(event, iregs, at);
+               /* log dropped samples number */
+               if (error[bit])
+                       perf_log_lost_samples(event, error[bit]);
+
+               if (counts[bit]) {
+                       __intel_pmu_pebs_event(event, iregs, base,
+                                              top, bit, counts[bit]);
+               }
        }
 }
 
index 94e5b506caa6d13206956095e646bfacbf558fb1..452a7bd2dedb6b72e98fad8da0cb8f2e0d2f32c3 100644 (file)
@@ -96,6 +96,7 @@ enum {
        X86_BR_NO_TX            = 1 << 14,/* not in transaction */
        X86_BR_ZERO_CALL        = 1 << 15,/* zero length call */
        X86_BR_CALL_STACK       = 1 << 16,/* call stack */
+       X86_BR_IND_JMP          = 1 << 17,/* indirect jump */
 };
 
 #define X86_BR_PLM (X86_BR_USER | X86_BR_KERNEL)
@@ -113,6 +114,7 @@ enum {
         X86_BR_IRQ      |\
         X86_BR_ABORT    |\
         X86_BR_IND_CALL |\
+        X86_BR_IND_JMP  |\
         X86_BR_ZERO_CALL)
 
 #define X86_BR_ALL (X86_BR_PLM | X86_BR_ANY)
@@ -262,9 +264,6 @@ void intel_pmu_lbr_sched_task(struct perf_event_context *ctx, bool sched_in)
        struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
        struct x86_perf_task_context *task_ctx;
 
-       if (!x86_pmu.lbr_nr)
-               return;
-
        /*
         * If LBR callstack feature is enabled and the stack was saved when
         * the task was scheduled out, restore the stack. Otherwise flush
@@ -523,6 +522,9 @@ static int intel_pmu_setup_sw_lbr_filter(struct perf_event *event)
                        X86_BR_CALL_STACK;
        }
 
+       if (br_type & PERF_SAMPLE_BRANCH_IND_JUMP)
+               mask |= X86_BR_IND_JMP;
+
        /*
         * stash actual user request into reg, it may
         * be used by fixup code for some CPU
@@ -736,7 +738,7 @@ static int branch_type(unsigned long from, unsigned long to, int abort)
                        break;
                case 4:
                case 5:
-                       ret = X86_BR_JMP;
+                       ret = X86_BR_IND_JMP;
                        break;
                }
                break;
@@ -844,6 +846,7 @@ static const int nhm_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
         */
        [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT] = LBR_IND_CALL | LBR_IND_JMP,
        [PERF_SAMPLE_BRANCH_COND_SHIFT]     = LBR_JCC,
+       [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT] = LBR_IND_JMP,
 };
 
 static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
@@ -856,6 +859,7 @@ static const int snb_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
                                                | LBR_FAR,
        [PERF_SAMPLE_BRANCH_IND_CALL_SHIFT]     = LBR_IND_CALL,
        [PERF_SAMPLE_BRANCH_COND_SHIFT]         = LBR_JCC,
+       [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT]     = LBR_IND_JMP,
 };
 
 static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
@@ -870,6 +874,7 @@ static const int hsw_lbr_sel_map[PERF_SAMPLE_BRANCH_MAX_SHIFT] = {
        [PERF_SAMPLE_BRANCH_COND_SHIFT]         = LBR_JCC,
        [PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT]   = LBR_REL_CALL | LBR_IND_CALL
                                                | LBR_RETURN | LBR_CALL_STACK,
+       [PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT]     = LBR_IND_JMP,
 };
 
 /* core */
index ffe666c2c6b58657b5895948a2e7d69f95223521..159887c3a89d66a4aaad415ddc069b25904b5676 100644 (file)
@@ -151,7 +151,7 @@ static int __init pt_pmu_hw_init(void)
 
                de_attr->attr.attr.name = pt_caps[i].name;
 
-               sysfs_attr_init(&de_attrs->attr.attr);
+               sysfs_attr_init(&de_attr->attr.attr);
 
                de_attr->attr.attr.mode         = S_IRUGO;
                de_attr->attr.show              = pt_cap_show;
@@ -187,15 +187,6 @@ static bool pt_event_valid(struct perf_event *event)
  * These all are cpu affine and operate on a local PT
  */
 
-static bool pt_is_running(void)
-{
-       u64 ctl;
-
-       rdmsrl(MSR_IA32_RTIT_CTL, ctl);
-
-       return !!(ctl & RTIT_CTL_TRACEEN);
-}
-
 static void pt_config(struct perf_event *event)
 {
        u64 reg;
@@ -609,16 +600,19 @@ static unsigned int pt_topa_next_entry(struct pt_buffer *buf, unsigned int pg)
  * @handle:    Current output handle.
  *
  * Place INT and STOP marks to prevent overwriting old data that the consumer
- * hasn't yet collected.
+ * hasn't yet collected and waking up the consumer after a certain fraction of
+ * the buffer has filled up. Only needed and sensible for non-snapshot counters.
+ *
+ * This obviously relies on buf::head to figure out buffer markers, so it has
+ * to be called after pt_buffer_reset_offsets() and before the hardware tracing
+ * is enabled.
  */
 static int pt_buffer_reset_markers(struct pt_buffer *buf,
                                   struct perf_output_handle *handle)
 
 {
-       unsigned long idx, npages, end;
-
-       if (buf->snapshot)
-               return 0;
+       unsigned long head = local64_read(&buf->head);
+       unsigned long idx, npages, wakeup;
 
        /* can't stop in the middle of an output region */
        if (buf->output_off + handle->size + 1 <
@@ -634,17 +628,26 @@ static int pt_buffer_reset_markers(struct pt_buffer *buf,
        buf->topa_index[buf->stop_pos]->stop = 0;
        buf->topa_index[buf->intr_pos]->intr = 0;
 
-       if (pt_cap_get(PT_CAP_topa_multiple_entries)) {
-               npages = (handle->size + 1) >> PAGE_SHIFT;
-               end = (local64_read(&buf->head) >> PAGE_SHIFT) + npages;
-               /*if (end > handle->wakeup >> PAGE_SHIFT)
-                 end = handle->wakeup >> PAGE_SHIFT;*/
-               idx = end & (buf->nr_pages - 1);
-               buf->stop_pos = idx;
-               idx = (local64_read(&buf->head) >> PAGE_SHIFT) + npages - 1;
-               idx &= buf->nr_pages - 1;
-               buf->intr_pos = idx;
-       }
+       /* how many pages till the STOP marker */
+       npages = handle->size >> PAGE_SHIFT;
+
+       /* if it's on a page boundary, fill up one more page */
+       if (!offset_in_page(head + handle->size + 1))
+               npages++;
+
+       idx = (head >> PAGE_SHIFT) + npages;
+       idx &= buf->nr_pages - 1;
+       buf->stop_pos = idx;
+
+       wakeup = handle->wakeup >> PAGE_SHIFT;
+
+       /* in the worst case, wake up the consumer one page before hard stop */
+       idx = (head >> PAGE_SHIFT) + npages - 1;
+       if (idx > wakeup)
+               idx = wakeup;
+
+       idx &= buf->nr_pages - 1;
+       buf->intr_pos = idx;
 
        buf->topa_index[buf->stop_pos]->stop = 1;
        buf->topa_index[buf->intr_pos]->intr = 1;
@@ -664,7 +667,7 @@ static void pt_buffer_setup_topa_index(struct pt_buffer *buf)
        struct topa *cur = buf->first, *prev = buf->last;
        struct topa_entry *te_cur = TOPA_ENTRY(cur, 0),
                *te_prev = TOPA_ENTRY(prev, prev->last - 1);
-       int pg = 0, idx = 0, ntopa = 0;
+       int pg = 0, idx = 0;
 
        while (pg < buf->nr_pages) {
                int tidx;
@@ -679,9 +682,9 @@ static void pt_buffer_setup_topa_index(struct pt_buffer *buf)
                        /* advance to next topa table */
                        idx = 0;
                        cur = list_entry(cur->list.next, struct topa, list);
-                       ntopa++;
-               } else
+               } else {
                        idx++;
+               }
                te_cur = TOPA_ENTRY(cur, idx);
        }
 
@@ -693,7 +696,14 @@ static void pt_buffer_setup_topa_index(struct pt_buffer *buf)
  * @head:      Write pointer (aux_head) from AUX buffer.
  *
  * Find the ToPA table and entry corresponding to given @head and set buffer's
- * "current" pointers accordingly.
+ * "current" pointers accordingly. This is done after we have obtained the
+ * current aux_head position from a successful call to perf_aux_output_begin()
+ * to make sure the hardware is writing to the right place.
+ *
+ * This function modifies buf::{cur,cur_idx,output_off} that will be programmed
+ * into PT msrs when the tracing is enabled and buf::head and buf::data_size,
+ * which are used to determine INT and STOP markers' locations by a subsequent
+ * call to pt_buffer_reset_markers().
  */
 static void pt_buffer_reset_offsets(struct pt_buffer *buf, unsigned long head)
 {
@@ -891,6 +901,7 @@ void intel_pt_interrupt(void)
                }
 
                pt_buffer_reset_offsets(buf, pt->handle.head);
+               /* snapshot counters don't use PMI, so it's safe */
                ret = pt_buffer_reset_markers(buf, &pt->handle);
                if (ret) {
                        perf_aux_output_end(&pt->handle, 0, true);
@@ -913,7 +924,7 @@ static void pt_event_start(struct perf_event *event, int mode)
        struct pt *pt = this_cpu_ptr(&pt_ctx);
        struct pt_buffer *buf = perf_get_aux(&pt->handle);
 
-       if (pt_is_running() || !buf || pt_buffer_is_full(buf, pt)) {
+       if (!buf || pt_buffer_is_full(buf, pt)) {
                event->hw.state = PERF_HES_STOPPED;
                return;
        }
@@ -944,7 +955,6 @@ static void pt_event_stop(struct perf_event *event, int mode)
        event->hw.state = PERF_HES_STOPPED;
 
        if (mode & PERF_EF_UPDATE) {
-               struct pt *pt = this_cpu_ptr(&pt_ctx);
                struct pt_buffer *buf = perf_get_aux(&pt->handle);
 
                if (!buf)
index 999289b94025623415693df205054e754e9a7d4b..5cbd4e64feb582d927c6751914757a6784d68a60 100644 (file)
@@ -204,9 +204,8 @@ again:
 
 static void rapl_start_hrtimer(struct rapl_pmu *pmu)
 {
-       __hrtimer_start_range_ns(&pmu->hrtimer,
-                       pmu->timer_interval, 0,
-                       HRTIMER_MODE_REL_PINNED, 0);
+       hrtimer_start(&pmu->hrtimer, pmu->timer_interval,
+                    HRTIMER_MODE_REL_PINNED);
 }
 
 static void rapl_stop_hrtimer(struct rapl_pmu *pmu)
@@ -722,6 +721,7 @@ static int __init rapl_pmu_init(void)
                break;
        case 60: /* Haswell */
        case 69: /* Haswell-Celeron */
+       case 61: /* Broadwell */
                rapl_cntr_mask = RAPL_IDX_HSW;
                rapl_pmu_events_group.attrs = rapl_events_hsw_attr;
                break;
index c635b8b49e931e7926efc3dc96475a8c577958e0..21b5e38c921b7a78102a2adbabf06328b56dbf9b 100644 (file)
@@ -233,9 +233,8 @@ static enum hrtimer_restart uncore_pmu_hrtimer(struct hrtimer *hrtimer)
 
 void uncore_pmu_start_hrtimer(struct intel_uncore_box *box)
 {
-       __hrtimer_start_range_ns(&box->hrtimer,
-                       ns_to_ktime(box->hrtimer_duration), 0,
-                       HRTIMER_MODE_REL_PINNED, 0);
+       hrtimer_start(&box->hrtimer, ns_to_ktime(box->hrtimer_duration),
+                     HRTIMER_MODE_REL_PINNED);
 }
 
 void uncore_pmu_cancel_hrtimer(struct intel_uncore_box *box)
@@ -365,9 +364,8 @@ static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int
        bitmap_zero(used_mask, UNCORE_PMC_IDX_MAX);
 
        for (i = 0, wmin = UNCORE_PMC_IDX_MAX, wmax = 0; i < n; i++) {
-               hwc = &box->event_list[i]->hw;
                c = uncore_get_event_constraint(box, box->event_list[i]);
-               hwc->constraint = c;
+               box->event_constraint[i] = c;
                wmin = min(wmin, c->weight);
                wmax = max(wmax, c->weight);
        }
@@ -375,7 +373,7 @@ static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int
        /* fastpath, try to reuse previous register */
        for (i = 0; i < n; i++) {
                hwc = &box->event_list[i]->hw;
-               c = hwc->constraint;
+               c = box->event_constraint[i];
 
                /* never assigned */
                if (hwc->idx == -1)
@@ -395,8 +393,8 @@ static int uncore_assign_events(struct intel_uncore_box *box, int assign[], int
        }
        /* slow path */
        if (i != n)
-               ret = perf_assign_events(box->event_list, n,
-                                        wmin, wmax, assign);
+               ret = perf_assign_events(box->event_constraint, n,
+                                        wmin, wmax, n, assign);
 
        if (!assign || ret) {
                for (i = 0; i < n; i++)
@@ -840,6 +838,7 @@ static int uncore_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id
        box->phys_id = phys_id;
        box->pci_dev = pdev;
        box->pmu = pmu;
+       uncore_box_init(box);
        pci_set_drvdata(pdev, box);
 
        raw_spin_lock(&uncore_box_lock);
@@ -922,6 +921,9 @@ static int __init uncore_pci_init(void)
        case 69: /* Haswell Celeron */
                ret = hsw_uncore_pci_init();
                break;
+       case 61: /* Broadwell */
+               ret = bdw_uncore_pci_init();
+               break;
        default:
                return 0;
        }
@@ -1003,8 +1005,10 @@ static int uncore_cpu_starting(int cpu)
                        pmu = &type->pmus[j];
                        box = *per_cpu_ptr(pmu->box, cpu);
                        /* called by uncore_cpu_init? */
-                       if (box && box->phys_id >= 0)
+                       if (box && box->phys_id >= 0) {
+                               uncore_box_init(box);
                                continue;
+                       }
 
                        for_each_online_cpu(k) {
                                exist = *per_cpu_ptr(pmu->box, k);
@@ -1020,8 +1024,10 @@ static int uncore_cpu_starting(int cpu)
                                }
                        }
 
-                       if (box)
+                       if (box) {
                                box->phys_id = phys_id;
+                               uncore_box_init(box);
+                       }
                }
        }
        return 0;
index 6c8c1e7e69d85d3ad217eada0f0e55573c3daaf0..0f77f0a196e488b7e617ed2cabd0246cdab2611d 100644 (file)
@@ -97,6 +97,7 @@ struct intel_uncore_box {
        atomic_t refcnt;
        struct perf_event *events[UNCORE_PMC_IDX_MAX];
        struct perf_event *event_list[UNCORE_PMC_IDX_MAX];
+       struct event_constraint *event_constraint[UNCORE_PMC_IDX_MAX];
        unsigned long active_mask[BITS_TO_LONGS(UNCORE_PMC_IDX_MAX)];
        u64 tags[UNCORE_PMC_IDX_MAX];
        struct pci_dev *pci_dev;
@@ -257,14 +258,6 @@ static inline int uncore_num_counters(struct intel_uncore_box *box)
        return box->pmu->type->num_counters;
 }
 
-static inline void uncore_box_init(struct intel_uncore_box *box)
-{
-       if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
-               if (box->pmu->type->ops->init_box)
-                       box->pmu->type->ops->init_box(box);
-       }
-}
-
 static inline void uncore_disable_box(struct intel_uncore_box *box)
 {
        if (box->pmu->type->ops->disable_box)
@@ -273,8 +266,6 @@ static inline void uncore_disable_box(struct intel_uncore_box *box)
 
 static inline void uncore_enable_box(struct intel_uncore_box *box)
 {
-       uncore_box_init(box);
-
        if (box->pmu->type->ops->enable_box)
                box->pmu->type->ops->enable_box(box);
 }
@@ -297,6 +288,14 @@ static inline u64 uncore_read_counter(struct intel_uncore_box *box,
        return box->pmu->type->ops->read_counter(box, event);
 }
 
+static inline void uncore_box_init(struct intel_uncore_box *box)
+{
+       if (!test_and_set_bit(UNCORE_BOX_FLAG_INITIATED, &box->flags)) {
+               if (box->pmu->type->ops->init_box)
+                       box->pmu->type->ops->init_box(box);
+       }
+}
+
 static inline bool uncore_box_is_fake(struct intel_uncore_box *box)
 {
        return (box->phys_id < 0);
@@ -326,6 +325,7 @@ extern struct event_constraint uncore_constraint_empty;
 int snb_uncore_pci_init(void);
 int ivb_uncore_pci_init(void);
 int hsw_uncore_pci_init(void);
+int bdw_uncore_pci_init(void);
 void snb_uncore_cpu_init(void);
 void nhm_uncore_cpu_init(void);
 
index 4562e9e22c60600a89f706c3b8c3cfb269636060..b005a78c701286e3c460b1f70fde54d8116fd91e 100644 (file)
@@ -7,6 +7,7 @@
 #define PCI_DEVICE_ID_INTEL_IVB_E3_IMC 0x0150
 #define PCI_DEVICE_ID_INTEL_HSW_IMC    0x0c00
 #define PCI_DEVICE_ID_INTEL_HSW_U_IMC  0x0a04
+#define PCI_DEVICE_ID_INTEL_BDW_IMC    0x1604
 
 /* SNB event control */
 #define SNB_UNC_CTL_EV_SEL_MASK                        0x000000ff
@@ -486,6 +487,14 @@ static const struct pci_device_id hsw_uncore_pci_ids[] = {
        { /* end: all zeroes */ },
 };
 
+static const struct pci_device_id bdw_uncore_pci_ids[] = {
+       { /* IMC */
+               PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_BDW_IMC),
+               .driver_data = UNCORE_PCI_DEV_DATA(SNB_PCI_UNCORE_IMC, 0),
+       },
+       { /* end: all zeroes */ },
+};
+
 static struct pci_driver snb_uncore_pci_driver = {
        .name           = "snb_uncore",
        .id_table       = snb_uncore_pci_ids,
@@ -501,6 +510,11 @@ static struct pci_driver hsw_uncore_pci_driver = {
        .id_table       = hsw_uncore_pci_ids,
 };
 
+static struct pci_driver bdw_uncore_pci_driver = {
+       .name           = "bdw_uncore",
+       .id_table       = bdw_uncore_pci_ids,
+};
+
 struct imc_uncore_pci_dev {
        __u32 pci_id;
        struct pci_driver *driver;
@@ -514,6 +528,7 @@ static const struct imc_uncore_pci_dev desktop_imc_pci_ids[] = {
        IMC_DEV(IVB_E3_IMC, &ivb_uncore_pci_driver), /* Xeon E3-1200 v2/3rd Gen Core processor */
        IMC_DEV(HSW_IMC, &hsw_uncore_pci_driver),    /* 4th Gen Core Processor */
        IMC_DEV(HSW_U_IMC, &hsw_uncore_pci_driver),  /* 4th Gen Core ULT Mobile Processor */
+       IMC_DEV(BDW_IMC, &bdw_uncore_pci_driver),    /* 5th Gen Core U */
        {  /* end marker */ }
 };
 
@@ -561,6 +576,11 @@ int hsw_uncore_pci_init(void)
        return imc_uncore_pci_init();
 }
 
+int bdw_uncore_pci_init(void)
+{
+       return imc_uncore_pci_init();
+}
+
 /* end of Sandy Bridge uncore support */
 
 /* Nehalem uncore support */
index 12d9548457e7195a8a36b458e374cab9cabe5e07..6d6e85dd5849878e9caa379ef20eaab10b97559f 100644 (file)
                                ((1ULL << (n)) - 1)))
 
 /* Haswell-EP Ubox */
-#define HSWEP_U_MSR_PMON_CTR0                  0x705
-#define HSWEP_U_MSR_PMON_CTL0                  0x709
+#define HSWEP_U_MSR_PMON_CTR0                  0x709
+#define HSWEP_U_MSR_PMON_CTL0                  0x705
 #define HSWEP_U_MSR_PMON_FILTER                        0x707
 
 #define HSWEP_U_MSR_PMON_UCLK_FIXED_CTL                0x703
@@ -1914,7 +1914,7 @@ static struct intel_uncore_type hswep_uncore_cbox = {
        .name                   = "cbox",
        .num_counters           = 4,
        .num_boxes              = 18,
-       .perf_ctr_bits          = 44,
+       .perf_ctr_bits          = 48,
        .event_ctl              = HSWEP_C0_MSR_PMON_CTL0,
        .perf_ctr               = HSWEP_C0_MSR_PMON_CTR0,
        .event_mask             = SNBEP_CBO_MSR_PMON_RAW_EVENT_MASK,
index e7d8c7608471e960d0b64daeeb5e6bc0c468d07a..18ca99f2798b16443291827269389bf0ad52bf94 100644 (file)
@@ -12,7 +12,8 @@ static void show_cpuinfo_core(struct seq_file *m, struct cpuinfo_x86 *c,
 {
 #ifdef CONFIG_SMP
        seq_printf(m, "physical id\t: %d\n", c->phys_proc_id);
-       seq_printf(m, "siblings\t: %d\n", cpumask_weight(cpu_core_mask(cpu)));
+       seq_printf(m, "siblings\t: %d\n",
+                  cpumask_weight(topology_core_cpumask(cpu)));
        seq_printf(m, "core id\t\t: %d\n", c->cpu_core_id);
        seq_printf(m, "cpu cores\t: %d\n", c->booted_cores);
        seq_printf(m, "apicid\t\t: %d\n", c->apicid);
index c76d3e37c6e1dc99a7083f05a2f79b7cd82968e1..e068d6683dba6bab6bd4c9f9804a621385e3baee 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/elfcore.h>
 #include <linux/module.h>
 #include <linux/slab.h>
+#include <linux/vmalloc.h>
 
 #include <asm/processor.h>
 #include <asm/hardirq.h>
index 6367a780cc8ca891b9513d2e4688717c8d5d3207..5ee771859b6f6e144090efe8f315e0c7707978b3 100644 (file)
@@ -4,7 +4,6 @@
 #include <linux/bootmem.h>
 #include <linux/export.h>
 #include <linux/io.h>
-#include <linux/irqdomain.h>
 #include <linux/interrupt.h>
 #include <linux/list.h>
 #include <linux/of.h>
@@ -17,6 +16,7 @@
 #include <linux/of_pci.h>
 #include <linux/initrd.h>
 
+#include <asm/irqdomain.h>
 #include <asm/hpet.h>
 #include <asm/apic.h>
 #include <asm/pci_x86.h>
@@ -196,38 +196,31 @@ static struct of_ioapic_type of_ioapic_type[] =
        },
 };
 
-static int ioapic_xlate(struct irq_domain *domain,
-                       struct device_node *controller,
-                       const u32 *intspec, u32 intsize,
-                       irq_hw_number_t *out_hwirq, u32 *out_type)
+static int dt_irqdomain_alloc(struct irq_domain *domain, unsigned int virq,
+                             unsigned int nr_irqs, void *arg)
 {
+       struct of_phandle_args *irq_data = (void *)arg;
        struct of_ioapic_type *it;
-       u32 line, idx, gsi;
+       struct irq_alloc_info tmp;
 
-       if (WARN_ON(intsize < 2))
+       if (WARN_ON(irq_data->args_count < 2))
                return -EINVAL;
-
-       line = intspec[0];
-
-       if (intspec[1] >= ARRAY_SIZE(of_ioapic_type))
+       if (irq_data->args[1] >= ARRAY_SIZE(of_ioapic_type))
                return -EINVAL;
 
-       it = &of_ioapic_type[intspec[1]];
+       it = &of_ioapic_type[irq_data->args[1]];
+       ioapic_set_alloc_attr(&tmp, NUMA_NO_NODE, it->trigger, it->polarity);
+       tmp.ioapic_id = mpc_ioapic_id(mp_irqdomain_ioapic_idx(domain));
+       tmp.ioapic_pin = irq_data->args[0];
 
-       idx = (u32)(long)domain->host_data;
-       gsi = mp_pin_to_gsi(idx, line);
-       if (mp_set_gsi_attr(gsi, it->trigger, it->polarity, cpu_to_node(0)))
-               return -EBUSY;
-
-       *out_hwirq = line;
-       *out_type = it->out_type;
-       return 0;
+       return mp_irqdomain_alloc(domain, virq, nr_irqs, &tmp);
 }
 
-const struct irq_domain_ops ioapic_irq_domain_ops = {
-       .map = mp_irqdomain_map,
-       .unmap = mp_irqdomain_unmap,
-       .xlate = ioapic_xlate,
+static const struct irq_domain_ops ioapic_irq_domain_ops = {
+       .alloc          = dt_irqdomain_alloc,
+       .free           = mp_irqdomain_free,
+       .activate       = mp_irqdomain_activate,
+       .deactivate     = mp_irqdomain_deactivate,
 };
 
 static void __init dtb_add_ioapic(struct device_node *dn)
index fe9f0b79a18b321bbc80711b7e71dde49b7436e5..5cb9a4d6f62387bd4925244ec11d529ba677ae0b 100644 (file)
@@ -627,8 +627,12 @@ static struct chipset early_qrk[] __initdata = {
        { PCI_VENDOR_ID_INTEL, PCI_ANY_ID, PCI_CLASS_DISPLAY_VGA, PCI_ANY_ID,
          QFLAG_APPLY_ONCE, intel_graphics_stolen },
        /*
-        * HPET on current version of Baytrail platform has accuracy
-        * problems, disable it for now:
+        * HPET on the current version of the Baytrail platform has accuracy
+        * problems: it will halt in deep idle state - so we disable it.
+        *
+        * More details can be found in section 18.10.1.3 of the datasheet:
+        *
+        *    http://www.intel.com/content/dam/www/public/us/en/documents/datasheets/atom-z8000-datasheet-vol-1.pdf
         */
        { PCI_VENDOR_ID_INTEL, 0x0f00,
                PCI_CLASS_BRIDGE_HOST, PCI_ANY_ID, 0, force_disable_hpet},
diff --git a/arch/x86/kernel/entry_32.S b/arch/x86/kernel/entry_32.S
deleted file mode 100644 (file)
index 1c30976..0000000
+++ /dev/null
@@ -1,1401 +0,0 @@
-/*
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- */
-
-/*
- * entry.S contains the system-call and fault low-level handling routines.
- * This also contains the timer-interrupt handler, as well as all interrupts
- * and faults that can result in a task-switch.
- *
- * NOTE: This code handles signal-recognition, which happens every time
- * after a timer-interrupt and after each system call.
- *
- * I changed all the .align's to 4 (16 byte alignment), as that's faster
- * on a 486.
- *
- * Stack layout in 'syscall_exit':
- *     ptrace needs to have all regs on the stack.
- *     if the order here is changed, it needs to be
- *     updated in fork.c:copy_process, signal.c:do_signal,
- *     ptrace.c and ptrace.h
- *
- *      0(%esp) - %ebx
- *      4(%esp) - %ecx
- *      8(%esp) - %edx
- *       C(%esp) - %esi
- *     10(%esp) - %edi
- *     14(%esp) - %ebp
- *     18(%esp) - %eax
- *     1C(%esp) - %ds
- *     20(%esp) - %es
- *     24(%esp) - %fs
- *     28(%esp) - %gs          saved iff !CONFIG_X86_32_LAZY_GS
- *     2C(%esp) - orig_eax
- *     30(%esp) - %eip
- *     34(%esp) - %cs
- *     38(%esp) - %eflags
- *     3C(%esp) - %oldesp
- *     40(%esp) - %oldss
- *
- * "current" is in register %ebx during any slow entries.
- */
-
-#include <linux/linkage.h>
-#include <linux/err.h>
-#include <asm/thread_info.h>
-#include <asm/irqflags.h>
-#include <asm/errno.h>
-#include <asm/segment.h>
-#include <asm/smp.h>
-#include <asm/page_types.h>
-#include <asm/percpu.h>
-#include <asm/dwarf2.h>
-#include <asm/processor-flags.h>
-#include <asm/ftrace.h>
-#include <asm/irq_vectors.h>
-#include <asm/cpufeature.h>
-#include <asm/alternative-asm.h>
-#include <asm/asm.h>
-#include <asm/smap.h>
-
-/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
-#include <linux/elf-em.h>
-#define AUDIT_ARCH_I386                (EM_386|__AUDIT_ARCH_LE)
-#define __AUDIT_ARCH_LE           0x40000000
-
-#ifndef CONFIG_AUDITSYSCALL
-#define sysenter_audit syscall_trace_entry
-#define sysexit_audit  syscall_exit_work
-#endif
-
-       .section .entry.text, "ax"
-
-/*
- * We use macros for low-level operations which need to be overridden
- * for paravirtualization.  The following will never clobber any registers:
- *   INTERRUPT_RETURN (aka. "iret")
- *   GET_CR0_INTO_EAX (aka. "movl %cr0, %eax")
- *   ENABLE_INTERRUPTS_SYSEXIT (aka "sti; sysexit").
- *
- * For DISABLE_INTERRUPTS/ENABLE_INTERRUPTS (aka "cli"/"sti"), you must
- * specify what registers can be overwritten (CLBR_NONE, CLBR_EAX/EDX/ECX/ANY).
- * Allowing a register to be clobbered can shrink the paravirt replacement
- * enough to patch inline, increasing performance.
- */
-
-#ifdef CONFIG_PREEMPT
-#define preempt_stop(clobbers) DISABLE_INTERRUPTS(clobbers); TRACE_IRQS_OFF
-#else
-#define preempt_stop(clobbers)
-#define resume_kernel          restore_all
-#endif
-
-.macro TRACE_IRQS_IRET
-#ifdef CONFIG_TRACE_IRQFLAGS
-       testl $X86_EFLAGS_IF,PT_EFLAGS(%esp)     # interrupts off?
-       jz 1f
-       TRACE_IRQS_ON
-1:
-#endif
-.endm
-
-/*
- * User gs save/restore
- *
- * %gs is used for userland TLS and kernel only uses it for stack
- * canary which is required to be at %gs:20 by gcc.  Read the comment
- * at the top of stackprotector.h for more info.
- *
- * Local labels 98 and 99 are used.
- */
-#ifdef CONFIG_X86_32_LAZY_GS
-
- /* unfortunately push/pop can't be no-op */
-.macro PUSH_GS
-       pushl_cfi $0
-.endm
-.macro POP_GS pop=0
-       addl $(4 + \pop), %esp
-       CFI_ADJUST_CFA_OFFSET -(4 + \pop)
-.endm
-.macro POP_GS_EX
-.endm
-
- /* all the rest are no-op */
-.macro PTGS_TO_GS
-.endm
-.macro PTGS_TO_GS_EX
-.endm
-.macro GS_TO_REG reg
-.endm
-.macro REG_TO_PTGS reg
-.endm
-.macro SET_KERNEL_GS reg
-.endm
-
-#else  /* CONFIG_X86_32_LAZY_GS */
-
-.macro PUSH_GS
-       pushl_cfi %gs
-       /*CFI_REL_OFFSET gs, 0*/
-.endm
-
-.macro POP_GS pop=0
-98:    popl_cfi %gs
-       /*CFI_RESTORE gs*/
-  .if \pop <> 0
-       add $\pop, %esp
-       CFI_ADJUST_CFA_OFFSET -\pop
-  .endif
-.endm
-.macro POP_GS_EX
-.pushsection .fixup, "ax"
-99:    movl $0, (%esp)
-       jmp 98b
-.popsection
-       _ASM_EXTABLE(98b,99b)
-.endm
-
-.macro PTGS_TO_GS
-98:    mov PT_GS(%esp), %gs
-.endm
-.macro PTGS_TO_GS_EX
-.pushsection .fixup, "ax"
-99:    movl $0, PT_GS(%esp)
-       jmp 98b
-.popsection
-       _ASM_EXTABLE(98b,99b)
-.endm
-
-.macro GS_TO_REG reg
-       movl %gs, \reg
-       /*CFI_REGISTER gs, \reg*/
-.endm
-.macro REG_TO_PTGS reg
-       movl \reg, PT_GS(%esp)
-       /*CFI_REL_OFFSET gs, PT_GS*/
-.endm
-.macro SET_KERNEL_GS reg
-       movl $(__KERNEL_STACK_CANARY), \reg
-       movl \reg, %gs
-.endm
-
-#endif /* CONFIG_X86_32_LAZY_GS */
-
-.macro SAVE_ALL
-       cld
-       PUSH_GS
-       pushl_cfi %fs
-       /*CFI_REL_OFFSET fs, 0;*/
-       pushl_cfi %es
-       /*CFI_REL_OFFSET es, 0;*/
-       pushl_cfi %ds
-       /*CFI_REL_OFFSET ds, 0;*/
-       pushl_cfi %eax
-       CFI_REL_OFFSET eax, 0
-       pushl_cfi %ebp
-       CFI_REL_OFFSET ebp, 0
-       pushl_cfi %edi
-       CFI_REL_OFFSET edi, 0
-       pushl_cfi %esi
-       CFI_REL_OFFSET esi, 0
-       pushl_cfi %edx
-       CFI_REL_OFFSET edx, 0
-       pushl_cfi %ecx
-       CFI_REL_OFFSET ecx, 0
-       pushl_cfi %ebx
-       CFI_REL_OFFSET ebx, 0
-       movl $(__USER_DS), %edx
-       movl %edx, %ds
-       movl %edx, %es
-       movl $(__KERNEL_PERCPU), %edx
-       movl %edx, %fs
-       SET_KERNEL_GS %edx
-.endm
-
-.macro RESTORE_INT_REGS
-       popl_cfi %ebx
-       CFI_RESTORE ebx
-       popl_cfi %ecx
-       CFI_RESTORE ecx
-       popl_cfi %edx
-       CFI_RESTORE edx
-       popl_cfi %esi
-       CFI_RESTORE esi
-       popl_cfi %edi
-       CFI_RESTORE edi
-       popl_cfi %ebp
-       CFI_RESTORE ebp
-       popl_cfi %eax
-       CFI_RESTORE eax
-.endm
-
-.macro RESTORE_REGS pop=0
-       RESTORE_INT_REGS
-1:     popl_cfi %ds
-       /*CFI_RESTORE ds;*/
-2:     popl_cfi %es
-       /*CFI_RESTORE es;*/
-3:     popl_cfi %fs
-       /*CFI_RESTORE fs;*/
-       POP_GS \pop
-.pushsection .fixup, "ax"
-4:     movl $0, (%esp)
-       jmp 1b
-5:     movl $0, (%esp)
-       jmp 2b
-6:     movl $0, (%esp)
-       jmp 3b
-.popsection
-       _ASM_EXTABLE(1b,4b)
-       _ASM_EXTABLE(2b,5b)
-       _ASM_EXTABLE(3b,6b)
-       POP_GS_EX
-.endm
-
-.macro RING0_INT_FRAME
-       CFI_STARTPROC simple
-       CFI_SIGNAL_FRAME
-       CFI_DEF_CFA esp, 3*4
-       /*CFI_OFFSET cs, -2*4;*/
-       CFI_OFFSET eip, -3*4
-.endm
-
-.macro RING0_EC_FRAME
-       CFI_STARTPROC simple
-       CFI_SIGNAL_FRAME
-       CFI_DEF_CFA esp, 4*4
-       /*CFI_OFFSET cs, -2*4;*/
-       CFI_OFFSET eip, -3*4
-.endm
-
-.macro RING0_PTREGS_FRAME
-       CFI_STARTPROC simple
-       CFI_SIGNAL_FRAME
-       CFI_DEF_CFA esp, PT_OLDESP-PT_EBX
-       /*CFI_OFFSET cs, PT_CS-PT_OLDESP;*/
-       CFI_OFFSET eip, PT_EIP-PT_OLDESP
-       /*CFI_OFFSET es, PT_ES-PT_OLDESP;*/
-       /*CFI_OFFSET ds, PT_DS-PT_OLDESP;*/
-       CFI_OFFSET eax, PT_EAX-PT_OLDESP
-       CFI_OFFSET ebp, PT_EBP-PT_OLDESP
-       CFI_OFFSET edi, PT_EDI-PT_OLDESP
-       CFI_OFFSET esi, PT_ESI-PT_OLDESP
-       CFI_OFFSET edx, PT_EDX-PT_OLDESP
-       CFI_OFFSET ecx, PT_ECX-PT_OLDESP
-       CFI_OFFSET ebx, PT_EBX-PT_OLDESP
-.endm
-
-ENTRY(ret_from_fork)
-       CFI_STARTPROC
-       pushl_cfi %eax
-       call schedule_tail
-       GET_THREAD_INFO(%ebp)
-       popl_cfi %eax
-       pushl_cfi $0x0202               # Reset kernel eflags
-       popfl_cfi
-       jmp syscall_exit
-       CFI_ENDPROC
-END(ret_from_fork)
-
-ENTRY(ret_from_kernel_thread)
-       CFI_STARTPROC
-       pushl_cfi %eax
-       call schedule_tail
-       GET_THREAD_INFO(%ebp)
-       popl_cfi %eax
-       pushl_cfi $0x0202               # Reset kernel eflags
-       popfl_cfi
-       movl PT_EBP(%esp),%eax
-       call *PT_EBX(%esp)
-       movl $0,PT_EAX(%esp)
-       jmp syscall_exit
-       CFI_ENDPROC
-ENDPROC(ret_from_kernel_thread)
-
-/*
- * Return to user mode is not as complex as all this looks,
- * but we want the default path for a system call return to
- * go as quickly as possible which is why some of this is
- * less clear than it otherwise should be.
- */
-
-       # userspace resumption stub bypassing syscall exit tracing
-       ALIGN
-       RING0_PTREGS_FRAME
-ret_from_exception:
-       preempt_stop(CLBR_ANY)
-ret_from_intr:
-       GET_THREAD_INFO(%ebp)
-#ifdef CONFIG_VM86
-       movl PT_EFLAGS(%esp), %eax      # mix EFLAGS and CS
-       movb PT_CS(%esp), %al
-       andl $(X86_EFLAGS_VM | SEGMENT_RPL_MASK), %eax
-#else
-       /*
-        * We can be coming here from child spawned by kernel_thread().
-        */
-       movl PT_CS(%esp), %eax
-       andl $SEGMENT_RPL_MASK, %eax
-#endif
-       cmpl $USER_RPL, %eax
-       jb resume_kernel                # not returning to v8086 or userspace
-
-ENTRY(resume_userspace)
-       LOCKDEP_SYS_EXIT
-       DISABLE_INTERRUPTS(CLBR_ANY)    # make sure we don't miss an interrupt
-                                       # setting need_resched or sigpending
-                                       # between sampling and the iret
-       TRACE_IRQS_OFF
-       movl TI_flags(%ebp), %ecx
-       andl $_TIF_WORK_MASK, %ecx      # is there any work to be done on
-                                       # int/exception return?
-       jne work_pending
-       jmp restore_all
-END(ret_from_exception)
-
-#ifdef CONFIG_PREEMPT
-ENTRY(resume_kernel)
-       DISABLE_INTERRUPTS(CLBR_ANY)
-need_resched:
-       cmpl $0,PER_CPU_VAR(__preempt_count)
-       jnz restore_all
-       testl $X86_EFLAGS_IF,PT_EFLAGS(%esp)    # interrupts off (exception path) ?
-       jz restore_all
-       call preempt_schedule_irq
-       jmp need_resched
-END(resume_kernel)
-#endif
-       CFI_ENDPROC
-
-/* SYSENTER_RETURN points to after the "sysenter" instruction in
-   the vsyscall page.  See vsyscall-sysentry.S, which defines the symbol.  */
-
-       # sysenter call handler stub
-ENTRY(ia32_sysenter_target)
-       CFI_STARTPROC simple
-       CFI_SIGNAL_FRAME
-       CFI_DEF_CFA esp, 0
-       CFI_REGISTER esp, ebp
-       movl TSS_sysenter_sp0(%esp),%esp
-sysenter_past_esp:
-       /*
-        * Interrupts are disabled here, but we can't trace it until
-        * enough kernel state to call TRACE_IRQS_OFF can be called - but
-        * we immediately enable interrupts at that point anyway.
-        */
-       pushl_cfi $__USER_DS
-       /*CFI_REL_OFFSET ss, 0*/
-       pushl_cfi %ebp
-       CFI_REL_OFFSET esp, 0
-       pushfl_cfi
-       orl $X86_EFLAGS_IF, (%esp)
-       pushl_cfi $__USER_CS
-       /*CFI_REL_OFFSET cs, 0*/
-       /*
-        * Push current_thread_info()->sysenter_return to the stack.
-        * A tiny bit of offset fixup is necessary: TI_sysenter_return
-        * is relative to thread_info, which is at the bottom of the
-        * kernel stack page.  4*4 means the 4 words pushed above;
-        * TOP_OF_KERNEL_STACK_PADDING takes us to the top of the stack;
-        * and THREAD_SIZE takes us to the bottom.
-        */
-       pushl_cfi ((TI_sysenter_return) - THREAD_SIZE + TOP_OF_KERNEL_STACK_PADDING + 4*4)(%esp)
-       CFI_REL_OFFSET eip, 0
-
-       pushl_cfi %eax
-       SAVE_ALL
-       ENABLE_INTERRUPTS(CLBR_NONE)
-
-/*
- * Load the potential sixth argument from user stack.
- * Careful about security.
- */
-       cmpl $__PAGE_OFFSET-3,%ebp
-       jae syscall_fault
-       ASM_STAC
-1:     movl (%ebp),%ebp
-       ASM_CLAC
-       movl %ebp,PT_EBP(%esp)
-       _ASM_EXTABLE(1b,syscall_fault)
-
-       GET_THREAD_INFO(%ebp)
-
-       testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
-       jnz sysenter_audit
-sysenter_do_call:
-       cmpl $(NR_syscalls), %eax
-       jae sysenter_badsys
-       call *sys_call_table(,%eax,4)
-sysenter_after_call:
-       movl %eax,PT_EAX(%esp)
-       LOCKDEP_SYS_EXIT
-       DISABLE_INTERRUPTS(CLBR_ANY)
-       TRACE_IRQS_OFF
-       movl TI_flags(%ebp), %ecx
-       testl $_TIF_ALLWORK_MASK, %ecx
-       jnz sysexit_audit
-sysenter_exit:
-/* if something modifies registers it must also disable sysexit */
-       movl PT_EIP(%esp), %edx
-       movl PT_OLDESP(%esp), %ecx
-       xorl %ebp,%ebp
-       TRACE_IRQS_ON
-1:     mov  PT_FS(%esp), %fs
-       PTGS_TO_GS
-       ENABLE_INTERRUPTS_SYSEXIT
-
-#ifdef CONFIG_AUDITSYSCALL
-sysenter_audit:
-       testl $(_TIF_WORK_SYSCALL_ENTRY & ~_TIF_SYSCALL_AUDIT),TI_flags(%ebp)
-       jnz syscall_trace_entry
-       /* movl PT_EAX(%esp), %eax      already set, syscall number: 1st arg to audit */
-       movl PT_EBX(%esp), %edx         /* ebx/a0: 2nd arg to audit */
-       /* movl PT_ECX(%esp), %ecx      already set, a1: 3nd arg to audit */
-       pushl_cfi PT_ESI(%esp)          /* a3: 5th arg */
-       pushl_cfi PT_EDX+4(%esp)        /* a2: 4th arg */
-       call __audit_syscall_entry
-       popl_cfi %ecx /* get that remapped edx off the stack */
-       popl_cfi %ecx /* get that remapped esi off the stack */
-       movl PT_EAX(%esp),%eax          /* reload syscall number */
-       jmp sysenter_do_call
-
-sysexit_audit:
-       testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
-       jnz syscall_exit_work
-       TRACE_IRQS_ON
-       ENABLE_INTERRUPTS(CLBR_ANY)
-       movl %eax,%edx          /* second arg, syscall return value */
-       cmpl $-MAX_ERRNO,%eax   /* is it an error ? */
-       setbe %al               /* 1 if so, 0 if not */
-       movzbl %al,%eax         /* zero-extend that */
-       call __audit_syscall_exit
-       DISABLE_INTERRUPTS(CLBR_ANY)
-       TRACE_IRQS_OFF
-       movl TI_flags(%ebp), %ecx
-       testl $(_TIF_ALLWORK_MASK & ~_TIF_SYSCALL_AUDIT), %ecx
-       jnz syscall_exit_work
-       movl PT_EAX(%esp),%eax  /* reload syscall return value */
-       jmp sysenter_exit
-#endif
-
-       CFI_ENDPROC
-.pushsection .fixup,"ax"
-2:     movl $0,PT_FS(%esp)
-       jmp 1b
-.popsection
-       _ASM_EXTABLE(1b,2b)
-       PTGS_TO_GS_EX
-ENDPROC(ia32_sysenter_target)
-
-       # system call handler stub
-ENTRY(system_call)
-       RING0_INT_FRAME                 # can't unwind into user space anyway
-       ASM_CLAC
-       pushl_cfi %eax                  # save orig_eax
-       SAVE_ALL
-       GET_THREAD_INFO(%ebp)
-                                       # system call tracing in operation / emulation
-       testl $_TIF_WORK_SYSCALL_ENTRY,TI_flags(%ebp)
-       jnz syscall_trace_entry
-       cmpl $(NR_syscalls), %eax
-       jae syscall_badsys
-syscall_call:
-       call *sys_call_table(,%eax,4)
-syscall_after_call:
-       movl %eax,PT_EAX(%esp)          # store the return value
-syscall_exit:
-       LOCKDEP_SYS_EXIT
-       DISABLE_INTERRUPTS(CLBR_ANY)    # make sure we don't miss an interrupt
-                                       # setting need_resched or sigpending
-                                       # between sampling and the iret
-       TRACE_IRQS_OFF
-       movl TI_flags(%ebp), %ecx
-       testl $_TIF_ALLWORK_MASK, %ecx  # current->work
-       jnz syscall_exit_work
-
-restore_all:
-       TRACE_IRQS_IRET
-restore_all_notrace:
-#ifdef CONFIG_X86_ESPFIX32
-       movl PT_EFLAGS(%esp), %eax      # mix EFLAGS, SS and CS
-       # Warning: PT_OLDSS(%esp) contains the wrong/random values if we
-       # are returning to the kernel.
-       # See comments in process.c:copy_thread() for details.
-       movb PT_OLDSS(%esp), %ah
-       movb PT_CS(%esp), %al
-       andl $(X86_EFLAGS_VM | (SEGMENT_TI_MASK << 8) | SEGMENT_RPL_MASK), %eax
-       cmpl $((SEGMENT_LDT << 8) | USER_RPL), %eax
-       CFI_REMEMBER_STATE
-       je ldt_ss                       # returning to user-space with LDT SS
-#endif
-restore_nocheck:
-       RESTORE_REGS 4                  # skip orig_eax/error_code
-irq_return:
-       INTERRUPT_RETURN
-.section .fixup,"ax"
-ENTRY(iret_exc)
-       pushl $0                        # no error code
-       pushl $do_iret_error
-       jmp error_code
-.previous
-       _ASM_EXTABLE(irq_return,iret_exc)
-
-#ifdef CONFIG_X86_ESPFIX32
-       CFI_RESTORE_STATE
-ldt_ss:
-#ifdef CONFIG_PARAVIRT
-       /*
-        * The kernel can't run on a non-flat stack if paravirt mode
-        * is active.  Rather than try to fixup the high bits of
-        * ESP, bypass this code entirely.  This may break DOSemu
-        * and/or Wine support in a paravirt VM, although the option
-        * is still available to implement the setting of the high
-        * 16-bits in the INTERRUPT_RETURN paravirt-op.
-        */
-       cmpl $0, pv_info+PARAVIRT_enabled
-       jne restore_nocheck
-#endif
-
-/*
- * Setup and switch to ESPFIX stack
- *
- * We're returning to userspace with a 16 bit stack. The CPU will not
- * restore the high word of ESP for us on executing iret... This is an
- * "official" bug of all the x86-compatible CPUs, which we can work
- * around to make dosemu and wine happy. We do this by preloading the
- * high word of ESP with the high word of the userspace ESP while
- * compensating for the offset by changing to the ESPFIX segment with
- * a base address that matches for the difference.
- */
-#define GDT_ESPFIX_SS PER_CPU_VAR(gdt_page) + (GDT_ENTRY_ESPFIX_SS * 8)
-       mov %esp, %edx                  /* load kernel esp */
-       mov PT_OLDESP(%esp), %eax       /* load userspace esp */
-       mov %dx, %ax                    /* eax: new kernel esp */
-       sub %eax, %edx                  /* offset (low word is 0) */
-       shr $16, %edx
-       mov %dl, GDT_ESPFIX_SS + 4 /* bits 16..23 */
-       mov %dh, GDT_ESPFIX_SS + 7 /* bits 24..31 */
-       pushl_cfi $__ESPFIX_SS
-       pushl_cfi %eax                  /* new kernel esp */
-       /* Disable interrupts, but do not irqtrace this section: we
-        * will soon execute iret and the tracer was already set to
-        * the irqstate after the iret */
-       DISABLE_INTERRUPTS(CLBR_EAX)
-       lss (%esp), %esp                /* switch to espfix segment */
-       CFI_ADJUST_CFA_OFFSET -8
-       jmp restore_nocheck
-#endif
-       CFI_ENDPROC
-ENDPROC(system_call)
-
-       # perform work that needs to be done immediately before resumption
-       ALIGN
-       RING0_PTREGS_FRAME              # can't unwind into user space anyway
-work_pending:
-       testb $_TIF_NEED_RESCHED, %cl
-       jz work_notifysig
-work_resched:
-       call schedule
-       LOCKDEP_SYS_EXIT
-       DISABLE_INTERRUPTS(CLBR_ANY)    # make sure we don't miss an interrupt
-                                       # setting need_resched or sigpending
-                                       # between sampling and the iret
-       TRACE_IRQS_OFF
-       movl TI_flags(%ebp), %ecx
-       andl $_TIF_WORK_MASK, %ecx      # is there any work to be done other
-                                       # than syscall tracing?
-       jz restore_all
-       testb $_TIF_NEED_RESCHED, %cl
-       jnz work_resched
-
-work_notifysig:                                # deal with pending signals and
-                                       # notify-resume requests
-#ifdef CONFIG_VM86
-       testl $X86_EFLAGS_VM, PT_EFLAGS(%esp)
-       movl %esp, %eax
-       jnz work_notifysig_v86          # returning to kernel-space or
-                                       # vm86-space
-1:
-#else
-       movl %esp, %eax
-#endif
-       TRACE_IRQS_ON
-       ENABLE_INTERRUPTS(CLBR_NONE)
-       movb PT_CS(%esp), %bl
-       andb $SEGMENT_RPL_MASK, %bl
-       cmpb $USER_RPL, %bl
-       jb resume_kernel
-       xorl %edx, %edx
-       call do_notify_resume
-       jmp resume_userspace
-
-#ifdef CONFIG_VM86
-       ALIGN
-work_notifysig_v86:
-       pushl_cfi %ecx                  # save ti_flags for do_notify_resume
-       call save_v86_state             # %eax contains pt_regs pointer
-       popl_cfi %ecx
-       movl %eax, %esp
-       jmp 1b
-#endif
-END(work_pending)
-
-       # perform syscall exit tracing
-       ALIGN
-syscall_trace_entry:
-       movl $-ENOSYS,PT_EAX(%esp)
-       movl %esp, %eax
-       call syscall_trace_enter
-       /* What it returned is what we'll actually use.  */
-       cmpl $(NR_syscalls), %eax
-       jnae syscall_call
-       jmp syscall_exit
-END(syscall_trace_entry)
-
-       # perform syscall exit tracing
-       ALIGN
-syscall_exit_work:
-       testl $_TIF_WORK_SYSCALL_EXIT, %ecx
-       jz work_pending
-       TRACE_IRQS_ON
-       ENABLE_INTERRUPTS(CLBR_ANY)     # could let syscall_trace_leave() call
-                                       # schedule() instead
-       movl %esp, %eax
-       call syscall_trace_leave
-       jmp resume_userspace
-END(syscall_exit_work)
-       CFI_ENDPROC
-
-       RING0_INT_FRAME                 # can't unwind into user space anyway
-syscall_fault:
-       ASM_CLAC
-       GET_THREAD_INFO(%ebp)
-       movl $-EFAULT,PT_EAX(%esp)
-       jmp resume_userspace
-END(syscall_fault)
-
-syscall_badsys:
-       movl $-ENOSYS,%eax
-       jmp syscall_after_call
-END(syscall_badsys)
-
-sysenter_badsys:
-       movl $-ENOSYS,%eax
-       jmp sysenter_after_call
-END(sysenter_badsys)
-       CFI_ENDPROC
-
-.macro FIXUP_ESPFIX_STACK
-/*
- * Switch back for ESPFIX stack to the normal zerobased stack
- *
- * We can't call C functions using the ESPFIX stack. This code reads
- * the high word of the segment base from the GDT and swiches to the
- * normal stack and adjusts ESP with the matching offset.
- */
-#ifdef CONFIG_X86_ESPFIX32
-       /* fixup the stack */
-       mov GDT_ESPFIX_SS + 4, %al /* bits 16..23 */
-       mov GDT_ESPFIX_SS + 7, %ah /* bits 24..31 */
-       shl $16, %eax
-       addl %esp, %eax                 /* the adjusted stack pointer */
-       pushl_cfi $__KERNEL_DS
-       pushl_cfi %eax
-       lss (%esp), %esp                /* switch to the normal stack segment */
-       CFI_ADJUST_CFA_OFFSET -8
-#endif
-.endm
-.macro UNWIND_ESPFIX_STACK
-#ifdef CONFIG_X86_ESPFIX32
-       movl %ss, %eax
-       /* see if on espfix stack */
-       cmpw $__ESPFIX_SS, %ax
-       jne 27f
-       movl $__KERNEL_DS, %eax
-       movl %eax, %ds
-       movl %eax, %es
-       /* switch to normal stack */
-       FIXUP_ESPFIX_STACK
-27:
-#endif
-.endm
-
-/*
- * Build the entry stubs with some assembler magic.
- * We pack 1 stub into every 8-byte block.
- */
-       .align 8
-ENTRY(irq_entries_start)
-       RING0_INT_FRAME
-    vector=FIRST_EXTERNAL_VECTOR
-    .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
-       pushl_cfi $(~vector+0x80)       /* Note: always in signed byte range */
-    vector=vector+1
-       jmp     common_interrupt
-       CFI_ADJUST_CFA_OFFSET -4
-       .align  8
-    .endr
-END(irq_entries_start)
-
-/*
- * the CPU automatically disables interrupts when executing an IRQ vector,
- * so IRQ-flags tracing has to follow that:
- */
-       .p2align CONFIG_X86_L1_CACHE_SHIFT
-common_interrupt:
-       ASM_CLAC
-       addl $-0x80,(%esp)      /* Adjust vector into the [-256,-1] range */
-       SAVE_ALL
-       TRACE_IRQS_OFF
-       movl %esp,%eax
-       call do_IRQ
-       jmp ret_from_intr
-ENDPROC(common_interrupt)
-       CFI_ENDPROC
-
-#define BUILD_INTERRUPT3(name, nr, fn) \
-ENTRY(name)                            \
-       RING0_INT_FRAME;                \
-       ASM_CLAC;                       \
-       pushl_cfi $~(nr);               \
-       SAVE_ALL;                       \
-       TRACE_IRQS_OFF                  \
-       movl %esp,%eax;                 \
-       call fn;                        \
-       jmp ret_from_intr;              \
-       CFI_ENDPROC;                    \
-ENDPROC(name)
-
-
-#ifdef CONFIG_TRACING
-#define TRACE_BUILD_INTERRUPT(name, nr)                \
-       BUILD_INTERRUPT3(trace_##name, nr, smp_trace_##name)
-#else
-#define TRACE_BUILD_INTERRUPT(name, nr)
-#endif
-
-#define BUILD_INTERRUPT(name, nr) \
-       BUILD_INTERRUPT3(name, nr, smp_##name); \
-       TRACE_BUILD_INTERRUPT(name, nr)
-
-/* The include is where all of the SMP etc. interrupts come from */
-#include <asm/entry_arch.h>
-
-ENTRY(coprocessor_error)
-       RING0_INT_FRAME
-       ASM_CLAC
-       pushl_cfi $0
-       pushl_cfi $do_coprocessor_error
-       jmp error_code
-       CFI_ENDPROC
-END(coprocessor_error)
-
-ENTRY(simd_coprocessor_error)
-       RING0_INT_FRAME
-       ASM_CLAC
-       pushl_cfi $0
-#ifdef CONFIG_X86_INVD_BUG
-       /* AMD 486 bug: invd from userspace calls exception 19 instead of #GP */
-       ALTERNATIVE "pushl_cfi $do_general_protection", \
-                   "pushl $do_simd_coprocessor_error", \
-                   X86_FEATURE_XMM
-#else
-       pushl_cfi $do_simd_coprocessor_error
-#endif
-       jmp error_code
-       CFI_ENDPROC
-END(simd_coprocessor_error)
-
-ENTRY(device_not_available)
-       RING0_INT_FRAME
-       ASM_CLAC
-       pushl_cfi $-1                   # mark this as an int
-       pushl_cfi $do_device_not_available
-       jmp error_code
-       CFI_ENDPROC
-END(device_not_available)
-
-#ifdef CONFIG_PARAVIRT
-ENTRY(native_iret)
-       iret
-       _ASM_EXTABLE(native_iret, iret_exc)
-END(native_iret)
-
-ENTRY(native_irq_enable_sysexit)
-       sti
-       sysexit
-END(native_irq_enable_sysexit)
-#endif
-
-ENTRY(overflow)
-       RING0_INT_FRAME
-       ASM_CLAC
-       pushl_cfi $0
-       pushl_cfi $do_overflow
-       jmp error_code
-       CFI_ENDPROC
-END(overflow)
-
-ENTRY(bounds)
-       RING0_INT_FRAME
-       ASM_CLAC
-       pushl_cfi $0
-       pushl_cfi $do_bounds
-       jmp error_code
-       CFI_ENDPROC
-END(bounds)
-
-ENTRY(invalid_op)
-       RING0_INT_FRAME
-       ASM_CLAC
-       pushl_cfi $0
-       pushl_cfi $do_invalid_op
-       jmp error_code
-       CFI_ENDPROC
-END(invalid_op)
-
-ENTRY(coprocessor_segment_overrun)
-       RING0_INT_FRAME
-       ASM_CLAC
-       pushl_cfi $0
-       pushl_cfi $do_coprocessor_segment_overrun
-       jmp error_code
-       CFI_ENDPROC
-END(coprocessor_segment_overrun)
-
-ENTRY(invalid_TSS)
-       RING0_EC_FRAME
-       ASM_CLAC
-       pushl_cfi $do_invalid_TSS
-       jmp error_code
-       CFI_ENDPROC
-END(invalid_TSS)
-
-ENTRY(segment_not_present)
-       RING0_EC_FRAME
-       ASM_CLAC
-       pushl_cfi $do_segment_not_present
-       jmp error_code
-       CFI_ENDPROC
-END(segment_not_present)
-
-ENTRY(stack_segment)
-       RING0_EC_FRAME
-       ASM_CLAC
-       pushl_cfi $do_stack_segment
-       jmp error_code
-       CFI_ENDPROC
-END(stack_segment)
-
-ENTRY(alignment_check)
-       RING0_EC_FRAME
-       ASM_CLAC
-       pushl_cfi $do_alignment_check
-       jmp error_code
-       CFI_ENDPROC
-END(alignment_check)
-
-ENTRY(divide_error)
-       RING0_INT_FRAME
-       ASM_CLAC
-       pushl_cfi $0                    # no error code
-       pushl_cfi $do_divide_error
-       jmp error_code
-       CFI_ENDPROC
-END(divide_error)
-
-#ifdef CONFIG_X86_MCE
-ENTRY(machine_check)
-       RING0_INT_FRAME
-       ASM_CLAC
-       pushl_cfi $0
-       pushl_cfi machine_check_vector
-       jmp error_code
-       CFI_ENDPROC
-END(machine_check)
-#endif
-
-ENTRY(spurious_interrupt_bug)
-       RING0_INT_FRAME
-       ASM_CLAC
-       pushl_cfi $0
-       pushl_cfi $do_spurious_interrupt_bug
-       jmp error_code
-       CFI_ENDPROC
-END(spurious_interrupt_bug)
-
-#ifdef CONFIG_XEN
-/* Xen doesn't set %esp to be precisely what the normal sysenter
-   entrypoint expects, so fix it up before using the normal path. */
-ENTRY(xen_sysenter_target)
-       RING0_INT_FRAME
-       addl $5*4, %esp         /* remove xen-provided frame */
-       CFI_ADJUST_CFA_OFFSET -5*4
-       jmp sysenter_past_esp
-       CFI_ENDPROC
-
-ENTRY(xen_hypervisor_callback)
-       CFI_STARTPROC
-       pushl_cfi $-1 /* orig_ax = -1 => not a system call */
-       SAVE_ALL
-       TRACE_IRQS_OFF
-
-       /* Check to see if we got the event in the critical
-          region in xen_iret_direct, after we've reenabled
-          events and checked for pending events.  This simulates
-          iret instruction's behaviour where it delivers a
-          pending interrupt when enabling interrupts. */
-       movl PT_EIP(%esp),%eax
-       cmpl $xen_iret_start_crit,%eax
-       jb   1f
-       cmpl $xen_iret_end_crit,%eax
-       jae  1f
-
-       jmp  xen_iret_crit_fixup
-
-ENTRY(xen_do_upcall)
-1:     mov %esp, %eax
-       call xen_evtchn_do_upcall
-#ifndef CONFIG_PREEMPT
-       call xen_maybe_preempt_hcall
-#endif
-       jmp  ret_from_intr
-       CFI_ENDPROC
-ENDPROC(xen_hypervisor_callback)
-
-# Hypervisor uses this for application faults while it executes.
-# We get here for two reasons:
-#  1. Fault while reloading DS, ES, FS or GS
-#  2. Fault while executing IRET
-# Category 1 we fix up by reattempting the load, and zeroing the segment
-# register if the load fails.
-# Category 2 we fix up by jumping to do_iret_error. We cannot use the
-# normal Linux return path in this case because if we use the IRET hypercall
-# to pop the stack frame we end up in an infinite loop of failsafe callbacks.
-# We distinguish between categories by maintaining a status value in EAX.
-ENTRY(xen_failsafe_callback)
-       CFI_STARTPROC
-       pushl_cfi %eax
-       movl $1,%eax
-1:     mov 4(%esp),%ds
-2:     mov 8(%esp),%es
-3:     mov 12(%esp),%fs
-4:     mov 16(%esp),%gs
-       /* EAX == 0 => Category 1 (Bad segment)
-          EAX != 0 => Category 2 (Bad IRET) */
-       testl %eax,%eax
-       popl_cfi %eax
-       lea 16(%esp),%esp
-       CFI_ADJUST_CFA_OFFSET -16
-       jz 5f
-       jmp iret_exc
-5:     pushl_cfi $-1 /* orig_ax = -1 => not a system call */
-       SAVE_ALL
-       jmp ret_from_exception
-       CFI_ENDPROC
-
-.section .fixup,"ax"
-6:     xorl %eax,%eax
-       movl %eax,4(%esp)
-       jmp 1b
-7:     xorl %eax,%eax
-       movl %eax,8(%esp)
-       jmp 2b
-8:     xorl %eax,%eax
-       movl %eax,12(%esp)
-       jmp 3b
-9:     xorl %eax,%eax
-       movl %eax,16(%esp)
-       jmp 4b
-.previous
-       _ASM_EXTABLE(1b,6b)
-       _ASM_EXTABLE(2b,7b)
-       _ASM_EXTABLE(3b,8b)
-       _ASM_EXTABLE(4b,9b)
-ENDPROC(xen_failsafe_callback)
-
-BUILD_INTERRUPT3(xen_hvm_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
-               xen_evtchn_do_upcall)
-
-#endif /* CONFIG_XEN */
-
-#if IS_ENABLED(CONFIG_HYPERV)
-
-BUILD_INTERRUPT3(hyperv_callback_vector, HYPERVISOR_CALLBACK_VECTOR,
-       hyperv_vector_handler)
-
-#endif /* CONFIG_HYPERV */
-
-#ifdef CONFIG_FUNCTION_TRACER
-#ifdef CONFIG_DYNAMIC_FTRACE
-
-ENTRY(mcount)
-       ret
-END(mcount)
-
-ENTRY(ftrace_caller)
-       pushl %eax
-       pushl %ecx
-       pushl %edx
-       pushl $0        /* Pass NULL as regs pointer */
-       movl 4*4(%esp), %eax
-       movl 0x4(%ebp), %edx
-       movl function_trace_op, %ecx
-       subl $MCOUNT_INSN_SIZE, %eax
-
-.globl ftrace_call
-ftrace_call:
-       call ftrace_stub
-
-       addl $4,%esp    /* skip NULL pointer */
-       popl %edx
-       popl %ecx
-       popl %eax
-ftrace_ret:
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-.globl ftrace_graph_call
-ftrace_graph_call:
-       jmp ftrace_stub
-#endif
-
-.globl ftrace_stub
-ftrace_stub:
-       ret
-END(ftrace_caller)
-
-ENTRY(ftrace_regs_caller)
-       pushf   /* push flags before compare (in cs location) */
-
-       /*
-        * i386 does not save SS and ESP when coming from kernel.
-        * Instead, to get sp, &regs->sp is used (see ptrace.h).
-        * Unfortunately, that means eflags must be at the same location
-        * as the current return ip is. We move the return ip into the
-        * ip location, and move flags into the return ip location.
-        */
-       pushl 4(%esp)   /* save return ip into ip slot */
-
-       pushl $0        /* Load 0 into orig_ax */
-       pushl %gs
-       pushl %fs
-       pushl %es
-       pushl %ds
-       pushl %eax
-       pushl %ebp
-       pushl %edi
-       pushl %esi
-       pushl %edx
-       pushl %ecx
-       pushl %ebx
-
-       movl 13*4(%esp), %eax   /* Get the saved flags */
-       movl %eax, 14*4(%esp)   /* Move saved flags into regs->flags location */
-                               /* clobbering return ip */
-       movl $__KERNEL_CS,13*4(%esp)
-
-       movl 12*4(%esp), %eax   /* Load ip (1st parameter) */
-       subl $MCOUNT_INSN_SIZE, %eax    /* Adjust ip */
-       movl 0x4(%ebp), %edx    /* Load parent ip (2nd parameter) */
-       movl function_trace_op, %ecx /* Save ftrace_pos in 3rd parameter */
-       pushl %esp              /* Save pt_regs as 4th parameter */
-
-GLOBAL(ftrace_regs_call)
-       call ftrace_stub
-
-       addl $4, %esp           /* Skip pt_regs */
-       movl 14*4(%esp), %eax   /* Move flags back into cs */
-       movl %eax, 13*4(%esp)   /* Needed to keep addl from modifying flags */
-       movl 12*4(%esp), %eax   /* Get return ip from regs->ip */
-       movl %eax, 14*4(%esp)   /* Put return ip back for ret */
-
-       popl %ebx
-       popl %ecx
-       popl %edx
-       popl %esi
-       popl %edi
-       popl %ebp
-       popl %eax
-       popl %ds
-       popl %es
-       popl %fs
-       popl %gs
-       addl $8, %esp           /* Skip orig_ax and ip */
-       popf                    /* Pop flags at end (no addl to corrupt flags) */
-       jmp ftrace_ret
-
-       popf
-       jmp  ftrace_stub
-#else /* ! CONFIG_DYNAMIC_FTRACE */
-
-ENTRY(mcount)
-       cmpl $__PAGE_OFFSET, %esp
-       jb ftrace_stub          /* Paging not enabled yet? */
-
-       cmpl $ftrace_stub, ftrace_trace_function
-       jnz trace
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-       cmpl $ftrace_stub, ftrace_graph_return
-       jnz ftrace_graph_caller
-
-       cmpl $ftrace_graph_entry_stub, ftrace_graph_entry
-       jnz ftrace_graph_caller
-#endif
-.globl ftrace_stub
-ftrace_stub:
-       ret
-
-       /* taken from glibc */
-trace:
-       pushl %eax
-       pushl %ecx
-       pushl %edx
-       movl 0xc(%esp), %eax
-       movl 0x4(%ebp), %edx
-       subl $MCOUNT_INSN_SIZE, %eax
-
-       call *ftrace_trace_function
-
-       popl %edx
-       popl %ecx
-       popl %eax
-       jmp ftrace_stub
-END(mcount)
-#endif /* CONFIG_DYNAMIC_FTRACE */
-#endif /* CONFIG_FUNCTION_TRACER */
-
-#ifdef CONFIG_FUNCTION_GRAPH_TRACER
-ENTRY(ftrace_graph_caller)
-       pushl %eax
-       pushl %ecx
-       pushl %edx
-       movl 0xc(%esp), %eax
-       lea 0x4(%ebp), %edx
-       movl (%ebp), %ecx
-       subl $MCOUNT_INSN_SIZE, %eax
-       call prepare_ftrace_return
-       popl %edx
-       popl %ecx
-       popl %eax
-       ret
-END(ftrace_graph_caller)
-
-.globl return_to_handler
-return_to_handler:
-       pushl %eax
-       pushl %edx
-       movl %ebp, %eax
-       call ftrace_return_to_handler
-       movl %eax, %ecx
-       popl %edx
-       popl %eax
-       jmp *%ecx
-#endif
-
-#ifdef CONFIG_TRACING
-ENTRY(trace_page_fault)
-       RING0_EC_FRAME
-       ASM_CLAC
-       pushl_cfi $trace_do_page_fault
-       jmp error_code
-       CFI_ENDPROC
-END(trace_page_fault)
-#endif
-
-ENTRY(page_fault)
-       RING0_EC_FRAME
-       ASM_CLAC
-       pushl_cfi $do_page_fault
-       ALIGN
-error_code:
-       /* the function address is in %gs's slot on the stack */
-       pushl_cfi %fs
-       /*CFI_REL_OFFSET fs, 0*/
-       pushl_cfi %es
-       /*CFI_REL_OFFSET es, 0*/
-       pushl_cfi %ds
-       /*CFI_REL_OFFSET ds, 0*/
-       pushl_cfi_reg eax
-       pushl_cfi_reg ebp
-       pushl_cfi_reg edi
-       pushl_cfi_reg esi
-       pushl_cfi_reg edx
-       pushl_cfi_reg ecx
-       pushl_cfi_reg ebx
-       cld
-       movl $(__KERNEL_PERCPU), %ecx
-       movl %ecx, %fs
-       UNWIND_ESPFIX_STACK
-       GS_TO_REG %ecx
-       movl PT_GS(%esp), %edi          # get the function address
-       movl PT_ORIG_EAX(%esp), %edx    # get the error code
-       movl $-1, PT_ORIG_EAX(%esp)     # no syscall to restart
-       REG_TO_PTGS %ecx
-       SET_KERNEL_GS %ecx
-       movl $(__USER_DS), %ecx
-       movl %ecx, %ds
-       movl %ecx, %es
-       TRACE_IRQS_OFF
-       movl %esp,%eax                  # pt_regs pointer
-       call *%edi
-       jmp ret_from_exception
-       CFI_ENDPROC
-END(page_fault)
-
-/*
- * Debug traps and NMI can happen at the one SYSENTER instruction
- * that sets up the real kernel stack. Check here, since we can't
- * allow the wrong stack to be used.
- *
- * "TSS_sysenter_sp0+12" is because the NMI/debug handler will have
- * already pushed 3 words if it hits on the sysenter instruction:
- * eflags, cs and eip.
- *
- * We just load the right stack, and push the three (known) values
- * by hand onto the new stack - while updating the return eip past
- * the instruction that would have done it for sysenter.
- */
-.macro FIX_STACK offset ok label
-       cmpw $__KERNEL_CS, 4(%esp)
-       jne \ok
-\label:
-       movl TSS_sysenter_sp0 + \offset(%esp), %esp
-       CFI_DEF_CFA esp, 0
-       CFI_UNDEFINED eip
-       pushfl_cfi
-       pushl_cfi $__KERNEL_CS
-       pushl_cfi $sysenter_past_esp
-       CFI_REL_OFFSET eip, 0
-.endm
-
-ENTRY(debug)
-       RING0_INT_FRAME
-       ASM_CLAC
-       cmpl $ia32_sysenter_target,(%esp)
-       jne debug_stack_correct
-       FIX_STACK 12, debug_stack_correct, debug_esp_fix_insn
-debug_stack_correct:
-       pushl_cfi $-1                   # mark this as an int
-       SAVE_ALL
-       TRACE_IRQS_OFF
-       xorl %edx,%edx                  # error code 0
-       movl %esp,%eax                  # pt_regs pointer
-       call do_debug
-       jmp ret_from_exception
-       CFI_ENDPROC
-END(debug)
-
-/*
- * NMI is doubly nasty. It can happen _while_ we're handling
- * a debug fault, and the debug fault hasn't yet been able to
- * clear up the stack. So we first check whether we got  an
- * NMI on the sysenter entry path, but after that we need to
- * check whether we got an NMI on the debug path where the debug
- * fault happened on the sysenter path.
- */
-ENTRY(nmi)
-       RING0_INT_FRAME
-       ASM_CLAC
-#ifdef CONFIG_X86_ESPFIX32
-       pushl_cfi %eax
-       movl %ss, %eax
-       cmpw $__ESPFIX_SS, %ax
-       popl_cfi %eax
-       je nmi_espfix_stack
-#endif
-       cmpl $ia32_sysenter_target,(%esp)
-       je nmi_stack_fixup
-       pushl_cfi %eax
-       movl %esp,%eax
-       /* Do not access memory above the end of our stack page,
-        * it might not exist.
-        */
-       andl $(THREAD_SIZE-1),%eax
-       cmpl $(THREAD_SIZE-20),%eax
-       popl_cfi %eax
-       jae nmi_stack_correct
-       cmpl $ia32_sysenter_target,12(%esp)
-       je nmi_debug_stack_check
-nmi_stack_correct:
-       /* We have a RING0_INT_FRAME here */
-       pushl_cfi %eax
-       SAVE_ALL
-       xorl %edx,%edx          # zero error code
-       movl %esp,%eax          # pt_regs pointer
-       call do_nmi
-       jmp restore_all_notrace
-       CFI_ENDPROC
-
-nmi_stack_fixup:
-       RING0_INT_FRAME
-       FIX_STACK 12, nmi_stack_correct, 1
-       jmp nmi_stack_correct
-
-nmi_debug_stack_check:
-       /* We have a RING0_INT_FRAME here */
-       cmpw $__KERNEL_CS,16(%esp)
-       jne nmi_stack_correct
-       cmpl $debug,(%esp)
-       jb nmi_stack_correct
-       cmpl $debug_esp_fix_insn,(%esp)
-       ja nmi_stack_correct
-       FIX_STACK 24, nmi_stack_correct, 1
-       jmp nmi_stack_correct
-
-#ifdef CONFIG_X86_ESPFIX32
-nmi_espfix_stack:
-       /* We have a RING0_INT_FRAME here.
-        *
-        * create the pointer to lss back
-        */
-       pushl_cfi %ss
-       pushl_cfi %esp
-       addl $4, (%esp)
-       /* copy the iret frame of 12 bytes */
-       .rept 3
-       pushl_cfi 16(%esp)
-       .endr
-       pushl_cfi %eax
-       SAVE_ALL
-       FIXUP_ESPFIX_STACK              # %eax == %esp
-       xorl %edx,%edx                  # zero error code
-       call do_nmi
-       RESTORE_REGS
-       lss 12+4(%esp), %esp            # back to espfix stack
-       CFI_ADJUST_CFA_OFFSET -24
-       jmp irq_return
-#endif
-       CFI_ENDPROC
-END(nmi)
-
-ENTRY(int3)
-       RING0_INT_FRAME
-       ASM_CLAC
-       pushl_cfi $-1                   # mark this as an int
-       SAVE_ALL
-       TRACE_IRQS_OFF
-       xorl %edx,%edx          # zero error code
-       movl %esp,%eax          # pt_regs pointer
-       call do_int3
-       jmp ret_from_exception
-       CFI_ENDPROC
-END(int3)
-
-ENTRY(general_protection)
-       RING0_EC_FRAME
-       pushl_cfi $do_general_protection
-       jmp error_code
-       CFI_ENDPROC
-END(general_protection)
-
-#ifdef CONFIG_KVM_GUEST
-ENTRY(async_page_fault)
-       RING0_EC_FRAME
-       ASM_CLAC
-       pushl_cfi $do_async_page_fault
-       jmp error_code
-       CFI_ENDPROC
-END(async_page_fault)
-#endif
-
diff --git a/arch/x86/kernel/entry_64.S b/arch/x86/kernel/entry_64.S
deleted file mode 100644 (file)
index 02c2eff..0000000
+++ /dev/null
@@ -1,1653 +0,0 @@
-/*
- *  linux/arch/x86_64/entry.S
- *
- *  Copyright (C) 1991, 1992  Linus Torvalds
- *  Copyright (C) 2000, 2001, 2002  Andi Kleen SuSE Labs
- *  Copyright (C) 2000  Pavel Machek <pavel@suse.cz>
- */
-
-/*
- * entry.S contains the system-call and fault low-level handling routines.
- *
- * Some of this is documented in Documentation/x86/entry_64.txt
- *
- * NOTE: This code handles signal-recognition, which happens every time
- * after an interrupt and after each system call.
- *
- * A note on terminology:
- * - iret frame: Architecture defined interrupt frame from SS to RIP
- * at the top of the kernel process stack.
- *
- * Some macro usage:
- * - CFI macros are used to generate dwarf2 unwind information for better
- * backtraces. They don't change any code.
- * - ENTRY/END Define functions in the symbol table.
- * - TRACE_IRQ_* - Trace hard interrupt state for lock debugging.
- * - idtentry - Define exception entry points.
- */
-
-#include <linux/linkage.h>
-#include <asm/segment.h>
-#include <asm/cache.h>
-#include <asm/errno.h>
-#include <asm/dwarf2.h>
-#include <asm/calling.h>
-#include <asm/asm-offsets.h>
-#include <asm/msr.h>
-#include <asm/unistd.h>
-#include <asm/thread_info.h>
-#include <asm/hw_irq.h>
-#include <asm/page_types.h>
-#include <asm/irqflags.h>
-#include <asm/paravirt.h>
-#include <asm/percpu.h>
-#include <asm/asm.h>
-#include <asm/context_tracking.h>
-#include <asm/smap.h>
-#include <asm/pgtable_types.h>
-#include <linux/err.h>
-
-/* Avoid __ASSEMBLER__'ifying <linux/audit.h> just for this.  */
-#include <linux/elf-em.h>
-#define AUDIT_ARCH_X86_64      (EM_X86_64|__AUDIT_ARCH_64BIT|__AUDIT_ARCH_LE)
-#define __AUDIT_ARCH_64BIT 0x80000000
-#define __AUDIT_ARCH_LE           0x40000000
-
-       .code64
-       .section .entry.text, "ax"
-
-
-#ifdef CONFIG_PARAVIRT
-ENTRY(native_usergs_sysret64)
-       swapgs
-       sysretq
-ENDPROC(native_usergs_sysret64)
-#endif /* CONFIG_PARAVIRT */
-
-
-.macro TRACE_IRQS_IRETQ
-#ifdef CONFIG_TRACE_IRQFLAGS
-       bt   $9,EFLAGS(%rsp)    /* interrupts off? */
-       jnc  1f
-       TRACE_IRQS_ON
-1:
-#endif
-.endm
-
-/*
- * When dynamic function tracer is enabled it will add a breakpoint
- * to all locations that it is about to modify, sync CPUs, update
- * all the code, sync CPUs, then remove the breakpoints. In this time
- * if lockdep is enabled, it might jump back into the debug handler
- * outside the updating of the IST protection. (TRACE_IRQS_ON/OFF).
- *
- * We need to change the IDT table before calling TRACE_IRQS_ON/OFF to
- * make sure the stack pointer does not get reset back to the top
- * of the debug stack, and instead just reuses the current stack.
- */
-#if defined(CONFIG_DYNAMIC_FTRACE) && defined(CONFIG_TRACE_IRQFLAGS)
-
-.macro TRACE_IRQS_OFF_DEBUG
-       call debug_stack_set_zero
-       TRACE_IRQS_OFF
-       call debug_stack_reset
-.endm
-
-.macro TRACE_IRQS_ON_DEBUG
-       call debug_stack_set_zero
-       TRACE_IRQS_ON
-       call debug_stack_reset
-.endm
-
-.macro TRACE_IRQS_IRETQ_DEBUG
-       bt   $9,EFLAGS(%rsp)    /* interrupts off? */
-       jnc  1f
-       TRACE_IRQS_ON_DEBUG
-1:
-.endm
-
-#else
-# define TRACE_IRQS_OFF_DEBUG          TRACE_IRQS_OFF
-# define TRACE_IRQS_ON_DEBUG           TRACE_IRQS_ON
-# define TRACE_IRQS_IRETQ_DEBUG                TRACE_IRQS_IRETQ
-#endif
-
-/*
- * empty frame
- */
-       .macro EMPTY_FRAME start=1 offset=0
-       .if \start
-       CFI_STARTPROC simple
-       CFI_SIGNAL_FRAME
-       CFI_DEF_CFA rsp,8+\offset
-       .else
-       CFI_DEF_CFA_OFFSET 8+\offset
-       .endif
-       .endm
-
-/*
- * initial frame state for interrupts (and exceptions without error code)
- */
-       .macro INTR_FRAME start=1 offset=0
-       EMPTY_FRAME \start, 5*8+\offset
-       /*CFI_REL_OFFSET ss, 4*8+\offset*/
-       CFI_REL_OFFSET rsp, 3*8+\offset
-       /*CFI_REL_OFFSET rflags, 2*8+\offset*/
-       /*CFI_REL_OFFSET cs, 1*8+\offset*/
-       CFI_REL_OFFSET rip, 0*8+\offset
-       .endm
-
-/*
- * initial frame state for exceptions with error code (and interrupts
- * with vector already pushed)
- */
-       .macro XCPT_FRAME start=1 offset=0
-       INTR_FRAME \start, 1*8+\offset
-       .endm
-
-/*
- * frame that enables passing a complete pt_regs to a C function.
- */
-       .macro DEFAULT_FRAME start=1 offset=0
-       XCPT_FRAME \start, ORIG_RAX+\offset
-       CFI_REL_OFFSET rdi, RDI+\offset
-       CFI_REL_OFFSET rsi, RSI+\offset
-       CFI_REL_OFFSET rdx, RDX+\offset
-       CFI_REL_OFFSET rcx, RCX+\offset
-       CFI_REL_OFFSET rax, RAX+\offset
-       CFI_REL_OFFSET r8, R8+\offset
-       CFI_REL_OFFSET r9, R9+\offset
-       CFI_REL_OFFSET r10, R10+\offset
-       CFI_REL_OFFSET r11, R11+\offset
-       CFI_REL_OFFSET rbx, RBX+\offset
-       CFI_REL_OFFSET rbp, RBP+\offset
-       CFI_REL_OFFSET r12, R12+\offset
-       CFI_REL_OFFSET r13, R13+\offset
-       CFI_REL_OFFSET r14, R14+\offset
-       CFI_REL_OFFSET r15, R15+\offset
-       .endm
-
-/*
- * 64bit SYSCALL instruction entry. Up to 6 arguments in registers.
- *
- * 64bit SYSCALL saves rip to rcx, clears rflags.RF, then saves rflags to r11,
- * then loads new ss, cs, and rip from previously programmed MSRs.
- * rflags gets masked by a value from another MSR (so CLD and CLAC
- * are not needed). SYSCALL does not save anything on the stack
- * and does not change rsp.
- *
- * Registers on entry:
- * rax  system call number
- * rcx  return address
- * r11  saved rflags (note: r11 is callee-clobbered register in C ABI)
- * rdi  arg0
- * rsi  arg1
- * rdx  arg2
- * r10  arg3 (needs to be moved to rcx to conform to C ABI)
- * r8   arg4
- * r9   arg5
- * (note: r12-r15,rbp,rbx are callee-preserved in C ABI)
- *
- * Only called from user space.
- *
- * When user can change pt_regs->foo always force IRET. That is because
- * it deals with uncanonical addresses better. SYSRET has trouble
- * with them due to bugs in both AMD and Intel CPUs.
- */
-
-ENTRY(system_call)
-       CFI_STARTPROC   simple
-       CFI_SIGNAL_FRAME
-       CFI_DEF_CFA     rsp,0
-       CFI_REGISTER    rip,rcx
-       /*CFI_REGISTER  rflags,r11*/
-
-       /*
-        * Interrupts are off on entry.
-        * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
-        * it is too small to ever cause noticeable irq latency.
-        */
-       SWAPGS_UNSAFE_STACK
-       /*
-        * A hypervisor implementation might want to use a label
-        * after the swapgs, so that it can do the swapgs
-        * for the guest and jump here on syscall.
-        */
-GLOBAL(system_call_after_swapgs)
-
-       movq    %rsp,PER_CPU_VAR(rsp_scratch)
-       movq    PER_CPU_VAR(kernel_stack),%rsp
-
-       /* Construct struct pt_regs on stack */
-       pushq_cfi $__USER_DS                    /* pt_regs->ss */
-       pushq_cfi PER_CPU_VAR(rsp_scratch)      /* pt_regs->sp */
-       /*
-        * Re-enable interrupts.
-        * We use 'rsp_scratch' as a scratch space, hence irq-off block above
-        * must execute atomically in the face of possible interrupt-driven
-        * task preemption. We must enable interrupts only after we're done
-        * with using rsp_scratch:
-        */
-       ENABLE_INTERRUPTS(CLBR_NONE)
-       pushq_cfi       %r11                    /* pt_regs->flags */
-       pushq_cfi       $__USER_CS              /* pt_regs->cs */
-       pushq_cfi       %rcx                    /* pt_regs->ip */
-       CFI_REL_OFFSET rip,0
-       pushq_cfi_reg   rax                     /* pt_regs->orig_ax */
-       pushq_cfi_reg   rdi                     /* pt_regs->di */
-       pushq_cfi_reg   rsi                     /* pt_regs->si */
-       pushq_cfi_reg   rdx                     /* pt_regs->dx */
-       pushq_cfi_reg   rcx                     /* pt_regs->cx */
-       pushq_cfi       $-ENOSYS                /* pt_regs->ax */
-       pushq_cfi_reg   r8                      /* pt_regs->r8 */
-       pushq_cfi_reg   r9                      /* pt_regs->r9 */
-       pushq_cfi_reg   r10                     /* pt_regs->r10 */
-       pushq_cfi_reg   r11                     /* pt_regs->r11 */
-       sub     $(6*8),%rsp /* pt_regs->bp,bx,r12-15 not saved */
-       CFI_ADJUST_CFA_OFFSET 6*8
-
-       testl $_TIF_WORK_SYSCALL_ENTRY, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-       jnz tracesys
-system_call_fastpath:
-#if __SYSCALL_MASK == ~0
-       cmpq $__NR_syscall_max,%rax
-#else
-       andl $__SYSCALL_MASK,%eax
-       cmpl $__NR_syscall_max,%eax
-#endif
-       ja      1f      /* return -ENOSYS (already in pt_regs->ax) */
-       movq %r10,%rcx
-       call *sys_call_table(,%rax,8)
-       movq %rax,RAX(%rsp)
-1:
-/*
- * Syscall return path ending with SYSRET (fast path).
- * Has incompletely filled pt_regs.
- */
-       LOCKDEP_SYS_EXIT
-       /*
-        * We do not frame this tiny irq-off block with TRACE_IRQS_OFF/ON,
-        * it is too small to ever cause noticeable irq latency.
-        */
-       DISABLE_INTERRUPTS(CLBR_NONE)
-
-       /*
-        * We must check ti flags with interrupts (or at least preemption)
-        * off because we must *never* return to userspace without
-        * processing exit work that is enqueued if we're preempted here.
-        * In particular, returning to userspace with any of the one-shot
-        * flags (TIF_NOTIFY_RESUME, TIF_USER_RETURN_NOTIFY, etc) set is
-        * very bad.
-        */
-       testl $_TIF_ALLWORK_MASK, ASM_THREAD_INFO(TI_flags, %rsp, SIZEOF_PTREGS)
-       jnz int_ret_from_sys_call_irqs_off      /* Go to the slow path */
-
-       CFI_REMEMBER_STATE
-
-       RESTORE_C_REGS_EXCEPT_RCX_R11
-       movq    RIP(%rsp),%rcx
-       CFI_REGISTER    rip,rcx
-       movq    EFLAGS(%rsp),%r11
-       /*CFI_REGISTER  rflags,r11*/
-       movq    RSP(%rsp),%rsp
-       /*
-        * 64bit SYSRET restores rip from rcx,
-        * rflags from r11 (but RF and VM bits are forced to 0),
-        * cs and ss are loaded from MSRs.
-        * Restoration of rflags re-enables interrupts.
-        *
-        * NB: On AMD CPUs with the X86_BUG_SYSRET_SS_ATTRS bug, the ss
-        * descriptor is not reinitialized.  This means that we should
-        * avoid SYSRET with SS == NULL, which could happen if we schedule,
-        * exit the kernel, and re-enter using an interrupt vector.  (All
-        * interrupt entries on x86_64 set SS to NULL.)  We prevent that
-        * from happening by reloading SS in __switch_to.  (Actually
-        * detecting the failure in 64-bit userspace is tricky but can be
-        * done.)
-        */
-       USERGS_SYSRET64
-
-       CFI_RESTORE_STATE
-
-       /* Do syscall entry tracing */
-tracesys:
-       movq %rsp, %rdi
-       movl $AUDIT_ARCH_X86_64, %esi
-       call syscall_trace_enter_phase1
-       test %rax, %rax
-       jnz tracesys_phase2             /* if needed, run the slow path */
-       RESTORE_C_REGS_EXCEPT_RAX       /* else restore clobbered regs */
-       movq ORIG_RAX(%rsp), %rax
-       jmp system_call_fastpath        /*      and return to the fast path */
-
-tracesys_phase2:
-       SAVE_EXTRA_REGS
-       movq %rsp, %rdi
-       movl $AUDIT_ARCH_X86_64, %esi
-       movq %rax,%rdx
-       call syscall_trace_enter_phase2
-
-       /*
-        * Reload registers from stack in case ptrace changed them.
-        * We don't reload %rax because syscall_trace_entry_phase2() returned
-        * the value it wants us to use in the table lookup.
-        */
-       RESTORE_C_REGS_EXCEPT_RAX
-       RESTORE_EXTRA_REGS
-#if __SYSCALL_MASK == ~0
-       cmpq $__NR_syscall_max,%rax
-#else
-       andl $__SYSCALL_MASK,%eax
-       cmpl $__NR_syscall_max,%eax
-#endif
-       ja      1f      /* return -ENOSYS (already in pt_regs->ax) */
-       movq %r10,%rcx  /* fixup for C */
-       call *sys_call_table(,%rax,8)
-       movq %rax,RAX(%rsp)
-1:
-       /* Use IRET because user could have changed pt_regs->foo */
-
-/*
- * Syscall return path ending with IRET.
- * Has correct iret frame.
- */
-GLOBAL(int_ret_from_sys_call)
-       DISABLE_INTERRUPTS(CLBR_NONE)
-int_ret_from_sys_call_irqs_off: /* jumps come here from the irqs-off SYSRET path */
-       TRACE_IRQS_OFF
-       movl $_TIF_ALLWORK_MASK,%edi
-       /* edi: mask to check */
-GLOBAL(int_with_check)
-       LOCKDEP_SYS_EXIT_IRQ
-       GET_THREAD_INFO(%rcx)
-       movl TI_flags(%rcx),%edx
-       andl %edi,%edx
-       jnz   int_careful
-       andl    $~TS_COMPAT,TI_status(%rcx)
-       jmp     syscall_return
-
-       /* Either reschedule or signal or syscall exit tracking needed. */
-       /* First do a reschedule test. */
-       /* edx: work, edi: workmask */
-int_careful:
-       bt $TIF_NEED_RESCHED,%edx
-       jnc  int_very_careful
-       TRACE_IRQS_ON
-       ENABLE_INTERRUPTS(CLBR_NONE)
-       pushq_cfi %rdi
-       SCHEDULE_USER
-       popq_cfi %rdi
-       DISABLE_INTERRUPTS(CLBR_NONE)
-       TRACE_IRQS_OFF
-       jmp int_with_check
-
-       /* handle signals and tracing -- both require a full pt_regs */
-int_very_careful:
-       TRACE_IRQS_ON
-       ENABLE_INTERRUPTS(CLBR_NONE)
-       SAVE_EXTRA_REGS
-       /* Check for syscall exit trace */
-       testl $_TIF_WORK_SYSCALL_EXIT,%edx
-       jz int_signal
-       pushq_cfi %rdi
-       leaq 8(%rsp),%rdi       # &ptregs -> arg1
-       call syscall_trace_leave
-       popq_cfi %rdi
-       andl $~(_TIF_WORK_SYSCALL_EXIT|_TIF_SYSCALL_EMU),%edi
-       jmp int_restore_rest
-
-int_signal:
-       testl $_TIF_DO_NOTIFY_MASK,%edx
-       jz 1f
-       movq %rsp,%rdi          # &ptregs -> arg1
-       xorl %esi,%esi          # oldset -> arg2
-       call do_notify_resume
-1:     movl $_TIF_WORK_MASK,%edi
-int_restore_rest:
-       RESTORE_EXTRA_REGS
-       DISABLE_INTERRUPTS(CLBR_NONE)
-       TRACE_IRQS_OFF
-       jmp int_with_check
-
-syscall_return:
-       /* The IRETQ could re-enable interrupts: */
-       DISABLE_INTERRUPTS(CLBR_ANY)
-       TRACE_IRQS_IRETQ
-
-       /*
-        * Try to use SYSRET instead of IRET if we're returning to
-        * a completely clean 64-bit userspace context.
-        */
-       movq RCX(%rsp),%rcx
-       cmpq %rcx,RIP(%rsp)             /* RCX == RIP */
-       jne opportunistic_sysret_failed
-
-       /*
-        * On Intel CPUs, SYSRET with non-canonical RCX/RIP will #GP
-        * in kernel space.  This essentially lets the user take over
-        * the kernel, since userspace controls RSP.  It's not worth
-        * testing for canonicalness exactly -- this check detects any
-        * of the 17 high bits set, which is true for non-canonical
-        * or kernel addresses.  (This will pessimize vsyscall=native.
-        * Big deal.)
-        *
-        * If virtual addresses ever become wider, this will need
-        * to be updated to remain correct on both old and new CPUs.
-        */
-       .ifne __VIRTUAL_MASK_SHIFT - 47
-       .error "virtual address width changed -- SYSRET checks need update"
-       .endif
-       shr $__VIRTUAL_MASK_SHIFT, %rcx
-       jnz opportunistic_sysret_failed
-
-       cmpq $__USER_CS,CS(%rsp)        /* CS must match SYSRET */
-       jne opportunistic_sysret_failed
-
-       movq R11(%rsp),%r11
-       cmpq %r11,EFLAGS(%rsp)          /* R11 == RFLAGS */
-       jne opportunistic_sysret_failed
-
-       /*
-        * SYSRET can't restore RF.  SYSRET can restore TF, but unlike IRET,
-        * restoring TF results in a trap from userspace immediately after
-        * SYSRET.  This would cause an infinite loop whenever #DB happens
-        * with register state that satisfies the opportunistic SYSRET
-        * conditions.  For example, single-stepping this user code:
-        *
-        *           movq $stuck_here,%rcx
-        *           pushfq
-        *           popq %r11
-        *   stuck_here:
-        *
-        * would never get past 'stuck_here'.
-        */
-       testq $(X86_EFLAGS_RF|X86_EFLAGS_TF), %r11
-       jnz opportunistic_sysret_failed
-
-       /* nothing to check for RSP */
-
-       cmpq $__USER_DS,SS(%rsp)        /* SS must match SYSRET */
-       jne opportunistic_sysret_failed
-
-       /*
-        * We win!  This label is here just for ease of understanding
-        * perf profiles.  Nothing jumps here.
-        */
-syscall_return_via_sysret:
-       CFI_REMEMBER_STATE
-       /* r11 is already restored (see code above) */
-       RESTORE_C_REGS_EXCEPT_R11
-       movq RSP(%rsp),%rsp
-       USERGS_SYSRET64
-       CFI_RESTORE_STATE
-
-opportunistic_sysret_failed:
-       SWAPGS
-       jmp     restore_c_regs_and_iret
-       CFI_ENDPROC
-END(system_call)
-
-
-       .macro FORK_LIKE func
-ENTRY(stub_\func)
-       CFI_STARTPROC
-       DEFAULT_FRAME 0, 8              /* offset 8: return address */
-       SAVE_EXTRA_REGS 8
-       jmp sys_\func
-       CFI_ENDPROC
-END(stub_\func)
-       .endm
-
-       FORK_LIKE  clone
-       FORK_LIKE  fork
-       FORK_LIKE  vfork
-
-ENTRY(stub_execve)
-       CFI_STARTPROC
-       DEFAULT_FRAME 0, 8
-       call    sys_execve
-return_from_execve:
-       testl   %eax, %eax
-       jz      1f
-       /* exec failed, can use fast SYSRET code path in this case */
-       ret
-1:
-       /* must use IRET code path (pt_regs->cs may have changed) */
-       addq    $8, %rsp
-       CFI_ADJUST_CFA_OFFSET -8
-       ZERO_EXTRA_REGS
-       movq    %rax,RAX(%rsp)
-       jmp     int_ret_from_sys_call
-       CFI_ENDPROC
-END(stub_execve)
-/*
- * Remaining execve stubs are only 7 bytes long.
- * ENTRY() often aligns to 16 bytes, which in this case has no benefits.
- */
-       .align  8
-GLOBAL(stub_execveat)
-       CFI_STARTPROC
-       DEFAULT_FRAME 0, 8
-       call    sys_execveat
-       jmp     return_from_execve
-       CFI_ENDPROC
-END(stub_execveat)
-
-#ifdef CONFIG_X86_X32_ABI
-       .align  8
-GLOBAL(stub_x32_execve)
-       CFI_STARTPROC
-       DEFAULT_FRAME 0, 8
-       call    compat_sys_execve
-       jmp     return_from_execve
-       CFI_ENDPROC
-END(stub_x32_execve)
-       .align  8
-GLOBAL(stub_x32_execveat)
-       CFI_STARTPROC
-       DEFAULT_FRAME 0, 8
-       call    compat_sys_execveat
-       jmp     return_from_execve
-       CFI_ENDPROC
-END(stub_x32_execveat)
-#endif
-
-#ifdef CONFIG_IA32_EMULATION
-       .align  8
-GLOBAL(stub32_execve)
-       CFI_STARTPROC
-       call    compat_sys_execve
-       jmp     return_from_execve
-       CFI_ENDPROC
-END(stub32_execve)
-       .align  8
-GLOBAL(stub32_execveat)
-       CFI_STARTPROC
-       call    compat_sys_execveat
-       jmp     return_from_execve
-       CFI_ENDPROC
-END(stub32_execveat)
-#endif
-
-/*
- * sigreturn is special because it needs to restore all registers on return.
- * This cannot be done with SYSRET, so use the IRET return path instead.
- */
-ENTRY(stub_rt_sigreturn)
-       CFI_STARTPROC
-       DEFAULT_FRAME 0, 8
-       /*
-        * SAVE_EXTRA_REGS result is not normally needed:
-        * sigreturn overwrites all pt_regs->GPREGS.
-        * But sigreturn can fail (!), and there is no easy way to detect that.
-        * To make sure RESTORE_EXTRA_REGS doesn't restore garbage on error,
-        * we SAVE_EXTRA_REGS here.
-        */
-       SAVE_EXTRA_REGS 8
-       call sys_rt_sigreturn
-return_from_stub:
-       addq    $8, %rsp
-       CFI_ADJUST_CFA_OFFSET -8
-       RESTORE_EXTRA_REGS
-       movq %rax,RAX(%rsp)
-       jmp int_ret_from_sys_call
-       CFI_ENDPROC
-END(stub_rt_sigreturn)
-
-#ifdef CONFIG_X86_X32_ABI
-ENTRY(stub_x32_rt_sigreturn)
-       CFI_STARTPROC
-       DEFAULT_FRAME 0, 8
-       SAVE_EXTRA_REGS 8
-       call sys32_x32_rt_sigreturn
-       jmp  return_from_stub
-       CFI_ENDPROC
-END(stub_x32_rt_sigreturn)
-#endif
-
-/*
- * A newly forked process directly context switches into this address.
- *
- * rdi: prev task we switched from
- */
-ENTRY(ret_from_fork)
-       DEFAULT_FRAME
-
-       LOCK ; btr $TIF_FORK,TI_flags(%r8)
-
-       pushq_cfi $0x0002
-       popfq_cfi                               # reset kernel eflags
-
-       call schedule_tail                      # rdi: 'prev' task parameter
-
-       RESTORE_EXTRA_REGS
-
-       testl $3,CS(%rsp)                       # from kernel_thread?
-
-       /*
-        * By the time we get here, we have no idea whether our pt_regs,
-        * ti flags, and ti status came from the 64-bit SYSCALL fast path,
-        * the slow path, or one of the ia32entry paths.
-        * Use IRET code path to return, since it can safely handle
-        * all of the above.
-        */
-       jnz     int_ret_from_sys_call
-
-       /* We came from kernel_thread */
-       /* nb: we depend on RESTORE_EXTRA_REGS above */
-       movq %rbp, %rdi
-       call *%rbx
-       movl $0, RAX(%rsp)
-       RESTORE_EXTRA_REGS
-       jmp int_ret_from_sys_call
-       CFI_ENDPROC
-END(ret_from_fork)
-
-/*
- * Build the entry stubs with some assembler magic.
- * We pack 1 stub into every 8-byte block.
- */
-       .align 8
-ENTRY(irq_entries_start)
-       INTR_FRAME
-    vector=FIRST_EXTERNAL_VECTOR
-    .rept (FIRST_SYSTEM_VECTOR - FIRST_EXTERNAL_VECTOR)
-       pushq_cfi $(~vector+0x80)       /* Note: always in signed byte range */
-    vector=vector+1
-       jmp     common_interrupt
-       CFI_ADJUST_CFA_OFFSET -8
-       .align  8
-    .endr
-       CFI_ENDPROC
-END(irq_entries_start)
-
-/*
- * Interrupt entry/exit.
- *
- * Interrupt entry points save only callee clobbered registers in fast path.
- *
- * Entry runs with interrupts off.
- */
-
-/* 0(%rsp): ~(interrupt number) */
-       .macro interrupt func
-       cld
-       /*
-        * Since nothing in interrupt handling code touches r12...r15 members
-        * of "struct pt_regs", and since interrupts can nest, we can save
-        * four stack slots and simultaneously provide
-        * an unwind-friendly stack layout by saving "truncated" pt_regs
-        * exactly up to rbp slot, without these members.
-        */
-       ALLOC_PT_GPREGS_ON_STACK -RBP
-       SAVE_C_REGS -RBP
-       /* this goes to 0(%rsp) for unwinder, not for saving the value: */
-       SAVE_EXTRA_REGS_RBP -RBP
-
-       leaq -RBP(%rsp),%rdi    /* arg1 for \func (pointer to pt_regs) */
-
-       testl $3, CS-RBP(%rsp)
-       je 1f
-       SWAPGS
-1:
-       /*
-        * Save previous stack pointer, optionally switch to interrupt stack.
-        * irq_count is used to check if a CPU is already on an interrupt stack
-        * or not. While this is essentially redundant with preempt_count it is
-        * a little cheaper to use a separate counter in the PDA (short of
-        * moving irq_enter into assembly, which would be too much work)
-        */
-       movq %rsp, %rsi
-       incl PER_CPU_VAR(irq_count)
-       cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
-       CFI_DEF_CFA_REGISTER    rsi
-       pushq %rsi
-       /*
-        * For debugger:
-        * "CFA (Current Frame Address) is the value on stack + offset"
-        */
-       CFI_ESCAPE      0x0f /* DW_CFA_def_cfa_expression */, 6, \
-                       0x77 /* DW_OP_breg7 (rsp) */, 0, \
-                       0x06 /* DW_OP_deref */, \
-                       0x08 /* DW_OP_const1u */, SIZEOF_PTREGS-RBP, \
-                       0x22 /* DW_OP_plus */
-       /* We entered an interrupt context - irqs are off: */
-       TRACE_IRQS_OFF
-
-       call \func
-       .endm
-
-       /*
-        * The interrupt stubs push (~vector+0x80) onto the stack and
-        * then jump to common_interrupt.
-        */
-       .p2align CONFIG_X86_L1_CACHE_SHIFT
-common_interrupt:
-       XCPT_FRAME
-       ASM_CLAC
-       addq $-0x80,(%rsp)              /* Adjust vector to [-256,-1] range */
-       interrupt do_IRQ
-       /* 0(%rsp): old RSP */
-ret_from_intr:
-       DISABLE_INTERRUPTS(CLBR_NONE)
-       TRACE_IRQS_OFF
-       decl PER_CPU_VAR(irq_count)
-
-       /* Restore saved previous stack */
-       popq %rsi
-       CFI_DEF_CFA rsi,SIZEOF_PTREGS-RBP /* reg/off reset after def_cfa_expr */
-       /* return code expects complete pt_regs - adjust rsp accordingly: */
-       leaq -RBP(%rsi),%rsp
-       CFI_DEF_CFA_REGISTER    rsp
-       CFI_ADJUST_CFA_OFFSET   RBP
-
-       testl $3,CS(%rsp)
-       je retint_kernel
-       /* Interrupt came from user space */
-
-       GET_THREAD_INFO(%rcx)
-       /*
-        * %rcx: thread info. Interrupts off.
-        */
-retint_with_reschedule:
-       movl $_TIF_WORK_MASK,%edi
-retint_check:
-       LOCKDEP_SYS_EXIT_IRQ
-       movl TI_flags(%rcx),%edx
-       andl %edi,%edx
-       CFI_REMEMBER_STATE
-       jnz  retint_careful
-
-retint_swapgs:         /* return to user-space */
-       /*
-        * The iretq could re-enable interrupts:
-        */
-       DISABLE_INTERRUPTS(CLBR_ANY)
-       TRACE_IRQS_IRETQ
-
-       SWAPGS
-       jmp     restore_c_regs_and_iret
-
-/* Returning to kernel space */
-retint_kernel:
-#ifdef CONFIG_PREEMPT
-       /* Interrupts are off */
-       /* Check if we need preemption */
-       bt      $9,EFLAGS(%rsp) /* interrupts were off? */
-       jnc     1f
-0:     cmpl    $0,PER_CPU_VAR(__preempt_count)
-       jnz     1f
-       call    preempt_schedule_irq
-       jmp     0b
-1:
-#endif
-       /*
-        * The iretq could re-enable interrupts:
-        */
-       TRACE_IRQS_IRETQ
-
-/*
- * At this label, code paths which return to kernel and to user,
- * which come from interrupts/exception and from syscalls, merge.
- */
-restore_c_regs_and_iret:
-       RESTORE_C_REGS
-       REMOVE_PT_GPREGS_FROM_STACK 8
-
-irq_return:
-       INTERRUPT_RETURN
-
-ENTRY(native_iret)
-       /*
-        * Are we returning to a stack segment from the LDT?  Note: in
-        * 64-bit mode SS:RSP on the exception stack is always valid.
-        */
-#ifdef CONFIG_X86_ESPFIX64
-       testb $4,(SS-RIP)(%rsp)
-       jnz native_irq_return_ldt
-#endif
-
-.global native_irq_return_iret
-native_irq_return_iret:
-       /*
-        * This may fault.  Non-paranoid faults on return to userspace are
-        * handled by fixup_bad_iret.  These include #SS, #GP, and #NP.
-        * Double-faults due to espfix64 are handled in do_double_fault.
-        * Other faults here are fatal.
-        */
-       iretq
-
-#ifdef CONFIG_X86_ESPFIX64
-native_irq_return_ldt:
-       pushq_cfi %rax
-       pushq_cfi %rdi
-       SWAPGS
-       movq PER_CPU_VAR(espfix_waddr),%rdi
-       movq %rax,(0*8)(%rdi)   /* RAX */
-       movq (2*8)(%rsp),%rax   /* RIP */
-       movq %rax,(1*8)(%rdi)
-       movq (3*8)(%rsp),%rax   /* CS */
-       movq %rax,(2*8)(%rdi)
-       movq (4*8)(%rsp),%rax   /* RFLAGS */
-       movq %rax,(3*8)(%rdi)
-       movq (6*8)(%rsp),%rax   /* SS */
-       movq %rax,(5*8)(%rdi)
-       movq (5*8)(%rsp),%rax   /* RSP */
-       movq %rax,(4*8)(%rdi)
-       andl $0xffff0000,%eax
-       popq_cfi %rdi
-       orq PER_CPU_VAR(espfix_stack),%rax
-       SWAPGS
-       movq %rax,%rsp
-       popq_cfi %rax
-       jmp native_irq_return_iret
-#endif
-
-       /* edi: workmask, edx: work */
-retint_careful:
-       CFI_RESTORE_STATE
-       bt    $TIF_NEED_RESCHED,%edx
-       jnc   retint_signal
-       TRACE_IRQS_ON
-       ENABLE_INTERRUPTS(CLBR_NONE)
-       pushq_cfi %rdi
-       SCHEDULE_USER
-       popq_cfi %rdi
-       GET_THREAD_INFO(%rcx)
-       DISABLE_INTERRUPTS(CLBR_NONE)
-       TRACE_IRQS_OFF
-       jmp retint_check
-
-retint_signal:
-       testl $_TIF_DO_NOTIFY_MASK,%edx
-       jz    retint_swapgs
-       TRACE_IRQS_ON
-       ENABLE_INTERRUPTS(CLBR_NONE)
-       SAVE_EXTRA_REGS
-       movq $-1,ORIG_RAX(%rsp)
-       xorl %esi,%esi          # oldset
-       movq %rsp,%rdi          # &pt_regs
-       call do_notify_resume
-       RESTORE_EXTRA_REGS
-       DISABLE_INTERRUPTS(CLBR_NONE)
-       TRACE_IRQS_OFF
-       GET_THREAD_INFO(%rcx)
-       jmp retint_with_reschedule
-
-       CFI_ENDPROC
-END(common_interrupt)
-
-/*
- * APIC interrupts.
- */
-.macro apicinterrupt3 num sym do_sym
-ENTRY(\sym)
-       INTR_FRAME
-       ASM_CLAC
-       pushq_cfi $~(\num)
-.Lcommon_\sym:
-       interrupt \do_sym
-       jmp ret_from_intr
-       CFI_ENDPROC
-END(\sym)
-.endm
-
-#ifdef CONFIG_TRACING
-#define trace(sym) trace_##sym
-#define smp_trace(sym) smp_trace_##sym
-
-.macro trace_apicinterrupt num sym
-apicinterrupt3 \num trace(\sym) smp_trace(\sym)
-.endm
-#else
-.macro trace_apicinterrupt num sym do_sym
-.endm
-#endif
-
-.macro apicinterrupt num sym do_sym
-apicinterrupt3 \num \sym \do_sym
-trace_apicinterrupt \num \sym
-.endm
-
-#ifdef CONFIG_SMP
-apicinterrupt3 IRQ_MOVE_CLEANUP_VECTOR \
-       irq_move_cleanup_interrupt smp_irq_move_cleanup_interrupt
-apicinterrupt3 REBOOT_VECTOR \
-       reboot_interrupt smp_reboot_interrupt
-#endif
-
-#ifdef CONFIG_X86_UV
-apicinterrupt3 UV_BAU_MESSAGE \
-       uv_bau_message_intr1 uv_bau_message_interrupt
-#endif
-apicinterrupt LOCAL_TIMER_VECTOR \
-       apic_timer_interrupt smp_apic_timer_interrupt
-apicinterrupt X86_PLATFORM_IPI_VECTOR \
-       x86_platform_ipi smp_x86_platform_ipi
-
-#ifdef CONFIG_HAVE_KVM
-apicinterrupt3 POSTED_INTR_VECTOR \
-       kvm_posted_intr_ipi smp_kvm_posted_intr_ipi
-#endif
-
-#ifdef CONFIG_X86_MCE_THRESHOLD
-apicinterrupt THRESHOLD_APIC_VECTOR \
-       threshold_interrupt smp_threshold_interrupt
-#endif
-
-#ifdef CONFIG_X86_THERMAL_VECTOR
-apicinterrupt THERMAL_APIC_VECTOR \
-       thermal_interrupt smp_thermal_interrupt
-#endif
-
-#ifdef CONFIG_SMP
-apicinterrupt CALL_FUNCTION_SINGLE_VECTOR \
-       call_function_single_interrupt smp_call_function_single_interrupt
-apicinterrupt CALL_FUNCTION_VECTOR \
-       call_function_interrupt smp_call_function_interrupt
-apicinterrupt RESCHEDULE_VECTOR \
-       reschedule_interrupt smp_reschedule_interrupt
-#endif
-
-apicinterrupt ERROR_APIC_VECTOR \
-       error_interrupt smp_error_interrupt
-apicinterrupt SPURIOUS_APIC_VECTOR \
-       spurious_interrupt smp_spurious_interrupt
-
-#ifdef CONFIG_IRQ_WORK
-apicinterrupt IRQ_WORK_VECTOR \
-       irq_work_interrupt smp_irq_work_interrupt
-#endif
-
-/*
- * Exception entry points.
- */
-#define CPU_TSS_IST(x) PER_CPU_VAR(cpu_tss) + (TSS_ist + ((x) - 1) * 8)
-
-.macro idtentry sym do_sym has_error_code:req paranoid=0 shift_ist=-1
-ENTRY(\sym)
-       /* Sanity check */
-       .if \shift_ist != -1 && \paranoid == 0
-       .error "using shift_ist requires paranoid=1"
-       .endif
-
-       .if \has_error_code
-       XCPT_FRAME
-       .else
-       INTR_FRAME
-       .endif
-
-       ASM_CLAC
-       PARAVIRT_ADJUST_EXCEPTION_FRAME
-
-       .ifeq \has_error_code
-       pushq_cfi $-1                   /* ORIG_RAX: no syscall to restart */
-       .endif
-
-       ALLOC_PT_GPREGS_ON_STACK
-
-       .if \paranoid
-       .if \paranoid == 1
-       CFI_REMEMBER_STATE
-       testl $3, CS(%rsp)              /* If coming from userspace, switch */
-       jnz 1f                          /* stacks. */
-       .endif
-       call paranoid_entry
-       .else
-       call error_entry
-       .endif
-       /* returned flag: ebx=0: need swapgs on exit, ebx=1: don't need it */
-
-       DEFAULT_FRAME 0
-
-       .if \paranoid
-       .if \shift_ist != -1
-       TRACE_IRQS_OFF_DEBUG            /* reload IDT in case of recursion */
-       .else
-       TRACE_IRQS_OFF
-       .endif
-       .endif
-
-       movq %rsp,%rdi                  /* pt_regs pointer */
-
-       .if \has_error_code
-       movq ORIG_RAX(%rsp),%rsi        /* get error code */
-       movq $-1,ORIG_RAX(%rsp)         /* no syscall to restart */
-       .else
-       xorl %esi,%esi                  /* no error code */
-       .endif
-
-       .if \shift_ist != -1
-       subq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
-       .endif
-
-       call \do_sym
-
-       .if \shift_ist != -1
-       addq $EXCEPTION_STKSZ, CPU_TSS_IST(\shift_ist)
-       .endif
-
-       /* these procedures expect "no swapgs" flag in ebx */
-       .if \paranoid
-       jmp paranoid_exit
-       .else
-       jmp error_exit
-       .endif
-
-       .if \paranoid == 1
-       CFI_RESTORE_STATE
-       /*
-        * Paranoid entry from userspace.  Switch stacks and treat it
-        * as a normal entry.  This means that paranoid handlers
-        * run in real process context if user_mode(regs).
-        */
-1:
-       call error_entry
-
-       DEFAULT_FRAME 0
-
-       movq %rsp,%rdi                  /* pt_regs pointer */
-       call sync_regs
-       movq %rax,%rsp                  /* switch stack */
-
-       movq %rsp,%rdi                  /* pt_regs pointer */
-
-       .if \has_error_code
-       movq ORIG_RAX(%rsp),%rsi        /* get error code */
-       movq $-1,ORIG_RAX(%rsp)         /* no syscall to restart */
-       .else
-       xorl %esi,%esi                  /* no error code */
-       .endif
-
-       call \do_sym
-
-       jmp error_exit                  /* %ebx: no swapgs flag */
-       .endif
-
-       CFI_ENDPROC
-END(\sym)
-.endm
-
-#ifdef CONFIG_TRACING
-.macro trace_idtentry sym do_sym has_error_code:req
-idtentry trace(\sym) trace(\do_sym) has_error_code=\has_error_code
-idtentry \sym \do_sym has_error_code=\has_error_code
-.endm
-#else
-.macro trace_idtentry sym do_sym has_error_code:req
-idtentry \sym \do_sym has_error_code=\has_error_code
-.endm
-#endif
-
-idtentry divide_error do_divide_error has_error_code=0
-idtentry overflow do_overflow has_error_code=0
-idtentry bounds do_bounds has_error_code=0
-idtentry invalid_op do_invalid_op has_error_code=0
-idtentry device_not_available do_device_not_available has_error_code=0
-idtentry double_fault do_double_fault has_error_code=1 paranoid=2
-idtentry coprocessor_segment_overrun do_coprocessor_segment_overrun has_error_code=0
-idtentry invalid_TSS do_invalid_TSS has_error_code=1
-idtentry segment_not_present do_segment_not_present has_error_code=1
-idtentry spurious_interrupt_bug do_spurious_interrupt_bug has_error_code=0
-idtentry coprocessor_error do_coprocessor_error has_error_code=0
-idtentry alignment_check do_alignment_check has_error_code=1
-idtentry simd_coprocessor_error do_simd_coprocessor_error has_error_code=0
-
-
-       /* Reload gs selector with exception handling */
-       /* edi:  new selector */
-ENTRY(native_load_gs_index)
-       CFI_STARTPROC
-       pushfq_cfi
-       DISABLE_INTERRUPTS(CLBR_ANY & ~CLBR_RDI)
-       SWAPGS
-gs_change:
-       movl %edi,%gs
-2:     mfence          /* workaround */
-       SWAPGS
-       popfq_cfi
-       ret
-       CFI_ENDPROC
-END(native_load_gs_index)
-
-       _ASM_EXTABLE(gs_change,bad_gs)
-       .section .fixup,"ax"
-       /* running with kernelgs */
-bad_gs:
-       SWAPGS                  /* switch back to user gs */
-       xorl %eax,%eax
-       movl %eax,%gs
-       jmp  2b
-       .previous
-
-/* Call softirq on interrupt stack. Interrupts are off. */
-ENTRY(do_softirq_own_stack)
-       CFI_STARTPROC
-       pushq_cfi %rbp
-       CFI_REL_OFFSET rbp,0
-       mov  %rsp,%rbp
-       CFI_DEF_CFA_REGISTER rbp
-       incl PER_CPU_VAR(irq_count)
-       cmove PER_CPU_VAR(irq_stack_ptr),%rsp
-       push  %rbp                      # backlink for old unwinder
-       call __do_softirq
-       leaveq
-       CFI_RESTORE             rbp
-       CFI_DEF_CFA_REGISTER    rsp
-       CFI_ADJUST_CFA_OFFSET   -8
-       decl PER_CPU_VAR(irq_count)
-       ret
-       CFI_ENDPROC
-END(do_softirq_own_stack)
-
-#ifdef CONFIG_XEN
-idtentry xen_hypervisor_callback xen_do_hypervisor_callback has_error_code=0
-
-/*
- * A note on the "critical region" in our callback handler.
- * We want to avoid stacking callback handlers due to events occurring
- * during handling of the last event. To do this, we keep events disabled
- * until we've done all processing. HOWEVER, we must enable events before
- * popping the stack frame (can't be done atomically) and so it would still
- * be possible to get enough handler activations to overflow the stack.
- * Although unlikely, bugs of that kind are hard to track down, so we'd
- * like to avoid the possibility.
- * So, on entry to the handler we detect whether we interrupted an
- * existing activation in its critical region -- if so, we pop the current
- * activation and restart the handler using the previous one.
- */
-ENTRY(xen_do_hypervisor_callback)   # do_hypervisor_callback(struct *pt_regs)
-       CFI_STARTPROC
-/*
- * Since we don't modify %rdi, evtchn_do_upall(struct *pt_regs) will
- * see the correct pointer to the pt_regs
- */
-       movq %rdi, %rsp            # we don't return, adjust the stack frame
-       CFI_ENDPROC
-       DEFAULT_FRAME
-11:    incl PER_CPU_VAR(irq_count)
-       movq %rsp,%rbp
-       CFI_DEF_CFA_REGISTER rbp
-       cmovzq PER_CPU_VAR(irq_stack_ptr),%rsp
-       pushq %rbp                      # backlink for old unwinder
-       call xen_evtchn_do_upcall
-       popq %rsp
-       CFI_DEF_CFA_REGISTER rsp
-       decl PER_CPU_VAR(irq_count)
-#ifndef CONFIG_PREEMPT
-       call xen_maybe_preempt_hcall
-#endif
-       jmp  error_exit
-       CFI_ENDPROC
-END(xen_do_hypervisor_callback)
-
-/*
- * Hypervisor uses this for application faults while it executes.
- * We get here for two reasons:
- *  1. Fault while reloading DS, ES, FS or GS
- *  2. Fault while executing IRET
- * Category 1 we do not need to fix up as Xen has already reloaded all segment
- * registers that could be reloaded and zeroed the others.
- * Category 2 we fix up by killing the current process. We cannot use the
- * normal Linux return path in this case because if we use the IRET hypercall
- * to pop the stack frame we end up in an infinite loop of failsafe callbacks.
- * We distinguish between categories by comparing each saved segment register
- * with its current contents: any discrepancy means we in category 1.
- */
-ENTRY(xen_failsafe_callback)
-       INTR_FRAME 1 (6*8)
-       /*CFI_REL_OFFSET gs,GS*/
-       /*CFI_REL_OFFSET fs,FS*/
-       /*CFI_REL_OFFSET es,ES*/
-       /*CFI_REL_OFFSET ds,DS*/
-       CFI_REL_OFFSET r11,8
-       CFI_REL_OFFSET rcx,0
-       movw %ds,%cx
-       cmpw %cx,0x10(%rsp)
-       CFI_REMEMBER_STATE
-       jne 1f
-       movw %es,%cx
-       cmpw %cx,0x18(%rsp)
-       jne 1f
-       movw %fs,%cx
-       cmpw %cx,0x20(%rsp)
-       jne 1f
-       movw %gs,%cx
-       cmpw %cx,0x28(%rsp)
-       jne 1f
-       /* All segments match their saved values => Category 2 (Bad IRET). */
-       movq (%rsp),%rcx
-       CFI_RESTORE rcx
-       movq 8(%rsp),%r11
-       CFI_RESTORE r11
-       addq $0x30,%rsp
-       CFI_ADJUST_CFA_OFFSET -0x30
-       pushq_cfi $0    /* RIP */
-       pushq_cfi %r11
-       pushq_cfi %rcx
-       jmp general_protection
-       CFI_RESTORE_STATE
-1:     /* Segment mismatch => Category 1 (Bad segment). Retry the IRET. */
-       movq (%rsp),%rcx
-       CFI_RESTORE rcx
-       movq 8(%rsp),%r11
-       CFI_RESTORE r11
-       addq $0x30,%rsp
-       CFI_ADJUST_CFA_OFFSET -0x30
-       pushq_cfi $-1 /* orig_ax = -1 => not a system call */
-       ALLOC_PT_GPREGS_ON_STACK
-       SAVE_C_REGS
-       SAVE_EXTRA_REGS
-       jmp error_exit
-       CFI_ENDPROC
-END(xen_failsafe_callback)
-
-apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
-       xen_hvm_callback_vector xen_evtchn_do_upcall
-
-#endif /* CONFIG_XEN */
-
-#if IS_ENABLED(CONFIG_HYPERV)
-apicinterrupt3 HYPERVISOR_CALLBACK_VECTOR \
-       hyperv_callback_vector hyperv_vector_handler
-#endif /* CONFIG_HYPERV */
-
-idtentry debug do_debug has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
-idtentry int3 do_int3 has_error_code=0 paranoid=1 shift_ist=DEBUG_STACK
-idtentry stack_segment do_stack_segment has_error_code=1
-#ifdef CONFIG_XEN
-idtentry xen_debug do_debug has_error_code=0
-idtentry xen_int3 do_int3 has_error_code=0
-idtentry xen_stack_segment do_stack_segment has_error_code=1
-#endif
-idtentry general_protection do_general_protection has_error_code=1
-trace_idtentry page_fault do_page_fault has_error_code=1
-#ifdef CONFIG_KVM_GUEST
-idtentry async_page_fault do_async_page_fault has_error_code=1
-#endif
-#ifdef CONFIG_X86_MCE
-idtentry machine_check has_error_code=0 paranoid=1 do_sym=*machine_check_vector(%rip)
-#endif
-
-/*
- * Save all registers in pt_regs, and switch gs if needed.
- * Use slow, but surefire "are we in kernel?" check.
- * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
- */
-ENTRY(paranoid_entry)
-       XCPT_FRAME 1 15*8
-       cld
-       SAVE_C_REGS 8
-       SAVE_EXTRA_REGS 8
-       movl $1,%ebx
-       movl $MSR_GS_BASE,%ecx
-       rdmsr
-       testl %edx,%edx
-       js 1f   /* negative -> in kernel */
-       SWAPGS
-       xorl %ebx,%ebx
-1:     ret
-       CFI_ENDPROC
-END(paranoid_entry)
-
-/*
- * "Paranoid" exit path from exception stack.  This is invoked
- * only on return from non-NMI IST interrupts that came
- * from kernel space.
- *
- * We may be returning to very strange contexts (e.g. very early
- * in syscall entry), so checking for preemption here would
- * be complicated.  Fortunately, we there's no good reason
- * to try to handle preemption here.
- */
-/* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */
-ENTRY(paranoid_exit)
-       DEFAULT_FRAME
-       DISABLE_INTERRUPTS(CLBR_NONE)
-       TRACE_IRQS_OFF_DEBUG
-       testl %ebx,%ebx                         /* swapgs needed? */
-       jnz paranoid_exit_no_swapgs
-       TRACE_IRQS_IRETQ
-       SWAPGS_UNSAFE_STACK
-       jmp paranoid_exit_restore
-paranoid_exit_no_swapgs:
-       TRACE_IRQS_IRETQ_DEBUG
-paranoid_exit_restore:
-       RESTORE_EXTRA_REGS
-       RESTORE_C_REGS
-       REMOVE_PT_GPREGS_FROM_STACK 8
-       INTERRUPT_RETURN
-       CFI_ENDPROC
-END(paranoid_exit)
-
-/*
- * Save all registers in pt_regs, and switch gs if needed.
- * Return: ebx=0: need swapgs on exit, ebx=1: otherwise
- */
-ENTRY(error_entry)
-       XCPT_FRAME 1 15*8
-       cld
-       SAVE_C_REGS 8
-       SAVE_EXTRA_REGS 8
-       xorl %ebx,%ebx
-       testl $3,CS+8(%rsp)
-       je error_kernelspace
-error_swapgs:
-       SWAPGS
-error_sti:
-       TRACE_IRQS_OFF
-       ret
-
-       /*
-        * There are two places in the kernel that can potentially fault with
-        * usergs. Handle them here.  B stepping K8s sometimes report a
-        * truncated RIP for IRET exceptions returning to compat mode. Check
-        * for these here too.
-        */
-error_kernelspace:
-       CFI_REL_OFFSET rcx, RCX+8
-       incl %ebx
-       leaq native_irq_return_iret(%rip),%rcx
-       cmpq %rcx,RIP+8(%rsp)
-       je error_bad_iret
-       movl %ecx,%eax  /* zero extend */
-       cmpq %rax,RIP+8(%rsp)
-       je bstep_iret
-       cmpq $gs_change,RIP+8(%rsp)
-       je error_swapgs
-       jmp error_sti
-
-bstep_iret:
-       /* Fix truncated RIP */
-       movq %rcx,RIP+8(%rsp)
-       /* fall through */
-
-error_bad_iret:
-       SWAPGS
-       mov %rsp,%rdi
-       call fixup_bad_iret
-       mov %rax,%rsp
-       decl %ebx       /* Return to usergs */
-       jmp error_sti
-       CFI_ENDPROC
-END(error_entry)
-
-
-/* On entry, ebx is "no swapgs" flag (1: don't need swapgs, 0: need it) */
-ENTRY(error_exit)
-       DEFAULT_FRAME
-       movl %ebx,%eax
-       RESTORE_EXTRA_REGS
-       DISABLE_INTERRUPTS(CLBR_NONE)
-       TRACE_IRQS_OFF
-       GET_THREAD_INFO(%rcx)
-       testl %eax,%eax
-       jne retint_kernel
-       LOCKDEP_SYS_EXIT_IRQ
-       movl TI_flags(%rcx),%edx
-       movl $_TIF_WORK_MASK,%edi
-       andl %edi,%edx
-       jnz retint_careful
-       jmp retint_swapgs
-       CFI_ENDPROC
-END(error_exit)
-
-/* Runs on exception stack */
-ENTRY(nmi)
-       INTR_FRAME
-       PARAVIRT_ADJUST_EXCEPTION_FRAME
-       /*
-        * We allow breakpoints in NMIs. If a breakpoint occurs, then
-        * the iretq it performs will take us out of NMI context.
-        * This means that we can have nested NMIs where the next
-        * NMI is using the top of the stack of the previous NMI. We
-        * can't let it execute because the nested NMI will corrupt the
-        * stack of the previous NMI. NMI handlers are not re-entrant
-        * anyway.
-        *
-        * To handle this case we do the following:
-        *  Check the a special location on the stack that contains
-        *  a variable that is set when NMIs are executing.
-        *  The interrupted task's stack is also checked to see if it
-        *  is an NMI stack.
-        *  If the variable is not set and the stack is not the NMI
-        *  stack then:
-        *    o Set the special variable on the stack
-        *    o Copy the interrupt frame into a "saved" location on the stack
-        *    o Copy the interrupt frame into a "copy" location on the stack
-        *    o Continue processing the NMI
-        *  If the variable is set or the previous stack is the NMI stack:
-        *    o Modify the "copy" location to jump to the repeate_nmi
-        *    o return back to the first NMI
-        *
-        * Now on exit of the first NMI, we first clear the stack variable
-        * The NMI stack will tell any nested NMIs at that point that it is
-        * nested. Then we pop the stack normally with iret, and if there was
-        * a nested NMI that updated the copy interrupt stack frame, a
-        * jump will be made to the repeat_nmi code that will handle the second
-        * NMI.
-        */
-
-       /* Use %rdx as our temp variable throughout */
-       pushq_cfi %rdx
-       CFI_REL_OFFSET rdx, 0
-
-       /*
-        * If %cs was not the kernel segment, then the NMI triggered in user
-        * space, which means it is definitely not nested.
-        */
-       cmpl $__KERNEL_CS, 16(%rsp)
-       jne first_nmi
-
-       /*
-        * Check the special variable on the stack to see if NMIs are
-        * executing.
-        */
-       cmpl $1, -8(%rsp)
-       je nested_nmi
-
-       /*
-        * Now test if the previous stack was an NMI stack.
-        * We need the double check. We check the NMI stack to satisfy the
-        * race when the first NMI clears the variable before returning.
-        * We check the variable because the first NMI could be in a
-        * breakpoint routine using a breakpoint stack.
-        */
-       lea     6*8(%rsp), %rdx
-       /* Compare the NMI stack (rdx) with the stack we came from (4*8(%rsp)) */
-       cmpq    %rdx, 4*8(%rsp)
-       /* If the stack pointer is above the NMI stack, this is a normal NMI */
-       ja      first_nmi
-       subq    $EXCEPTION_STKSZ, %rdx
-       cmpq    %rdx, 4*8(%rsp)
-       /* If it is below the NMI stack, it is a normal NMI */
-       jb      first_nmi
-       /* Ah, it is within the NMI stack, treat it as nested */
-
-       CFI_REMEMBER_STATE
-
-nested_nmi:
-       /*
-        * Do nothing if we interrupted the fixup in repeat_nmi.
-        * It's about to repeat the NMI handler, so we are fine
-        * with ignoring this one.
-        */
-       movq $repeat_nmi, %rdx
-       cmpq 8(%rsp), %rdx
-       ja 1f
-       movq $end_repeat_nmi, %rdx
-       cmpq 8(%rsp), %rdx
-       ja nested_nmi_out
-
-1:
-       /* Set up the interrupted NMIs stack to jump to repeat_nmi */
-       leaq -1*8(%rsp), %rdx
-       movq %rdx, %rsp
-       CFI_ADJUST_CFA_OFFSET 1*8
-       leaq -10*8(%rsp), %rdx
-       pushq_cfi $__KERNEL_DS
-       pushq_cfi %rdx
-       pushfq_cfi
-       pushq_cfi $__KERNEL_CS
-       pushq_cfi $repeat_nmi
-
-       /* Put stack back */
-       addq $(6*8), %rsp
-       CFI_ADJUST_CFA_OFFSET -6*8
-
-nested_nmi_out:
-       popq_cfi %rdx
-       CFI_RESTORE rdx
-
-       /* No need to check faults here */
-       INTERRUPT_RETURN
-
-       CFI_RESTORE_STATE
-first_nmi:
-       /*
-        * Because nested NMIs will use the pushed location that we
-        * stored in rdx, we must keep that space available.
-        * Here's what our stack frame will look like:
-        * +-------------------------+
-        * | original SS             |
-        * | original Return RSP     |
-        * | original RFLAGS         |
-        * | original CS             |
-        * | original RIP            |
-        * +-------------------------+
-        * | temp storage for rdx    |
-        * +-------------------------+
-        * | NMI executing variable  |
-        * +-------------------------+
-        * | copied SS               |
-        * | copied Return RSP       |
-        * | copied RFLAGS           |
-        * | copied CS               |
-        * | copied RIP              |
-        * +-------------------------+
-        * | Saved SS                |
-        * | Saved Return RSP        |
-        * | Saved RFLAGS            |
-        * | Saved CS                |
-        * | Saved RIP               |
-        * +-------------------------+
-        * | pt_regs                 |
-        * +-------------------------+
-        *
-        * The saved stack frame is used to fix up the copied stack frame
-        * that a nested NMI may change to make the interrupted NMI iret jump
-        * to the repeat_nmi. The original stack frame and the temp storage
-        * is also used by nested NMIs and can not be trusted on exit.
-        */
-       /* Do not pop rdx, nested NMIs will corrupt that part of the stack */
-       movq (%rsp), %rdx
-       CFI_RESTORE rdx
-
-       /* Set the NMI executing variable on the stack. */
-       pushq_cfi $1
-
-       /*
-        * Leave room for the "copied" frame
-        */
-       subq $(5*8), %rsp
-       CFI_ADJUST_CFA_OFFSET 5*8
-
-       /* Copy the stack frame to the Saved frame */
-       .rept 5
-       pushq_cfi 11*8(%rsp)
-       .endr
-       CFI_DEF_CFA_OFFSET 5*8
-
-       /* Everything up to here is safe from nested NMIs */
-
-       /*
-        * If there was a nested NMI, the first NMI's iret will return
-        * here. But NMIs are still enabled and we can take another
-        * nested NMI. The nested NMI checks the interrupted RIP to see
-        * if it is between repeat_nmi and end_repeat_nmi, and if so
-        * it will just return, as we are about to repeat an NMI anyway.
-        * This makes it safe to copy to the stack frame that a nested
-        * NMI will update.
-        */
-repeat_nmi:
-       /*
-        * Update the stack variable to say we are still in NMI (the update
-        * is benign for the non-repeat case, where 1 was pushed just above
-        * to this very stack slot).
-        */
-       movq $1, 10*8(%rsp)
-
-       /* Make another copy, this one may be modified by nested NMIs */
-       addq $(10*8), %rsp
-       CFI_ADJUST_CFA_OFFSET -10*8
-       .rept 5
-       pushq_cfi -6*8(%rsp)
-       .endr
-       subq $(5*8), %rsp
-       CFI_DEF_CFA_OFFSET 5*8
-end_repeat_nmi:
-
-       /*
-        * Everything below this point can be preempted by a nested
-        * NMI if the first NMI took an exception and reset our iret stack
-        * so that we repeat another NMI.
-        */
-       pushq_cfi $-1           /* ORIG_RAX: no syscall to restart */
-       ALLOC_PT_GPREGS_ON_STACK
-
-       /*
-        * Use paranoid_entry to handle SWAPGS, but no need to use paranoid_exit
-        * as we should not be calling schedule in NMI context.
-        * Even with normal interrupts enabled. An NMI should not be
-        * setting NEED_RESCHED or anything that normal interrupts and
-        * exceptions might do.
-        */
-       call paranoid_entry
-       DEFAULT_FRAME 0
-
-       /*
-        * Save off the CR2 register. If we take a page fault in the NMI then
-        * it could corrupt the CR2 value. If the NMI preempts a page fault
-        * handler before it was able to read the CR2 register, and then the
-        * NMI itself takes a page fault, the page fault that was preempted
-        * will read the information from the NMI page fault and not the
-        * origin fault. Save it off and restore it if it changes.
-        * Use the r12 callee-saved register.
-        */
-       movq %cr2, %r12
-
-       /* paranoidentry do_nmi, 0; without TRACE_IRQS_OFF */
-       movq %rsp,%rdi
-       movq $-1,%rsi
-       call do_nmi
-
-       /* Did the NMI take a page fault? Restore cr2 if it did */
-       movq %cr2, %rcx
-       cmpq %rcx, %r12
-       je 1f
-       movq %r12, %cr2
-1:
-       
-       testl %ebx,%ebx                         /* swapgs needed? */
-       jnz nmi_restore
-nmi_swapgs:
-       SWAPGS_UNSAFE_STACK
-nmi_restore:
-       RESTORE_EXTRA_REGS
-       RESTORE_C_REGS
-       /* Pop the extra iret frame at once */
-       REMOVE_PT_GPREGS_FROM_STACK 6*8
-
-       /* Clear the NMI executing stack variable */
-       movq $0, 5*8(%rsp)
-       jmp irq_return
-       CFI_ENDPROC
-END(nmi)
-
-ENTRY(ignore_sysret)
-       CFI_STARTPROC
-       mov $-ENOSYS,%eax
-       sysret
-       CFI_ENDPROC
-END(ignore_sysret)
-
diff --git a/arch/x86/kernel/fpu/Makefile b/arch/x86/kernel/fpu/Makefile
new file mode 100644 (file)
index 0000000..68279ef
--- /dev/null
@@ -0,0 +1,5 @@
+#
+# Build rules for the FPU support code:
+#
+
+obj-y                          += init.o bugs.o core.o regset.o signal.o xstate.o
diff --git a/arch/x86/kernel/fpu/bugs.c b/arch/x86/kernel/fpu/bugs.c
new file mode 100644 (file)
index 0000000..dd9ca9b
--- /dev/null
@@ -0,0 +1,71 @@
+/*
+ * x86 FPU bug checks:
+ */
+#include <asm/fpu/internal.h>
+
+/*
+ * Boot time CPU/FPU FDIV bug detection code:
+ */
+
+static double __initdata x = 4195835.0;
+static double __initdata y = 3145727.0;
+
+/*
+ * This used to check for exceptions..
+ * However, it turns out that to support that,
+ * the XMM trap handlers basically had to
+ * be buggy. So let's have a correct XMM trap
+ * handler, and forget about printing out
+ * some status at boot.
+ *
+ * We should really only care about bugs here
+ * anyway. Not features.
+ */
+static void __init check_fpu(void)
+{
+       u32 cr0_saved;
+       s32 fdiv_bug;
+
+       /* We might have CR0::TS set already, clear it: */
+       cr0_saved = read_cr0();
+       write_cr0(cr0_saved & ~X86_CR0_TS);
+
+       kernel_fpu_begin();
+
+       /*
+        * trap_init() enabled FXSR and company _before_ testing for FP
+        * problems here.
+        *
+        * Test for the divl bug: http://en.wikipedia.org/wiki/Fdiv_bug
+        */
+       __asm__("fninit\n\t"
+               "fldl %1\n\t"
+               "fdivl %2\n\t"
+               "fmull %2\n\t"
+               "fldl %1\n\t"
+               "fsubp %%st,%%st(1)\n\t"
+               "fistpl %0\n\t"
+               "fwait\n\t"
+               "fninit"
+               : "=m" (*&fdiv_bug)
+               : "m" (*&x), "m" (*&y));
+
+       kernel_fpu_end();
+
+       write_cr0(cr0_saved);
+
+       if (fdiv_bug) {
+               set_cpu_bug(&boot_cpu_data, X86_BUG_FDIV);
+               pr_warn("Hmm, FPU with FDIV bug\n");
+       }
+}
+
+void __init fpu__init_check_bugs(void)
+{
+       /*
+        * kernel_fpu_begin/end() in check_fpu() relies on the patched
+        * alternative instructions.
+        */
+       if (cpu_has_fpu)
+               check_fpu();
+}
diff --git a/arch/x86/kernel/fpu/core.c b/arch/x86/kernel/fpu/core.c
new file mode 100644 (file)
index 0000000..79de954
--- /dev/null
@@ -0,0 +1,523 @@
+/*
+ *  Copyright (C) 1994 Linus Torvalds
+ *
+ *  Pentium III FXSR, SSE support
+ *  General FPU state handling cleanups
+ *     Gareth Hughes <gareth@valinux.com>, May 2000
+ */
+#include <asm/fpu/internal.h>
+#include <asm/fpu/regset.h>
+#include <asm/fpu/signal.h>
+#include <asm/traps.h>
+
+#include <linux/hardirq.h>
+
+/*
+ * Represents the initial FPU state. It's mostly (but not completely) zeroes,
+ * depending on the FPU hardware format:
+ */
+union fpregs_state init_fpstate __read_mostly;
+
+/*
+ * Track whether the kernel is using the FPU state
+ * currently.
+ *
+ * This flag is used:
+ *
+ *   - by IRQ context code to potentially use the FPU
+ *     if it's unused.
+ *
+ *   - to debug kernel_fpu_begin()/end() correctness
+ */
+static DEFINE_PER_CPU(bool, in_kernel_fpu);
+
+/*
+ * Track which context is using the FPU on the CPU:
+ */
+DEFINE_PER_CPU(struct fpu *, fpu_fpregs_owner_ctx);
+
+static void kernel_fpu_disable(void)
+{
+       WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
+       this_cpu_write(in_kernel_fpu, true);
+}
+
+static void kernel_fpu_enable(void)
+{
+       WARN_ON_FPU(!this_cpu_read(in_kernel_fpu));
+       this_cpu_write(in_kernel_fpu, false);
+}
+
+static bool kernel_fpu_disabled(void)
+{
+       return this_cpu_read(in_kernel_fpu);
+}
+
+/*
+ * Were we in an interrupt that interrupted kernel mode?
+ *
+ * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
+ * pair does nothing at all: the thread must not have fpu (so
+ * that we don't try to save the FPU state), and TS must
+ * be set (so that the clts/stts pair does nothing that is
+ * visible in the interrupted kernel thread).
+ *
+ * Except for the eagerfpu case when we return true; in the likely case
+ * the thread has FPU but we are not going to set/clear TS.
+ */
+static bool interrupted_kernel_fpu_idle(void)
+{
+       if (kernel_fpu_disabled())
+               return false;
+
+       if (use_eager_fpu())
+               return true;
+
+       return !current->thread.fpu.fpregs_active && (read_cr0() & X86_CR0_TS);
+}
+
+/*
+ * Were we in user mode (or vm86 mode) when we were
+ * interrupted?
+ *
+ * Doing kernel_fpu_begin/end() is ok if we are running
+ * in an interrupt context from user mode - we'll just
+ * save the FPU state as required.
+ */
+static bool interrupted_user_mode(void)
+{
+       struct pt_regs *regs = get_irq_regs();
+       return regs && user_mode(regs);
+}
+
+/*
+ * Can we use the FPU in kernel mode with the
+ * whole "kernel_fpu_begin/end()" sequence?
+ *
+ * It's always ok in process context (ie "not interrupt")
+ * but it is sometimes ok even from an irq.
+ */
+bool irq_fpu_usable(void)
+{
+       return !in_interrupt() ||
+               interrupted_user_mode() ||
+               interrupted_kernel_fpu_idle();
+}
+EXPORT_SYMBOL(irq_fpu_usable);
+
+void __kernel_fpu_begin(void)
+{
+       struct fpu *fpu = &current->thread.fpu;
+
+       WARN_ON_FPU(!irq_fpu_usable());
+
+       kernel_fpu_disable();
+
+       if (fpu->fpregs_active) {
+               copy_fpregs_to_fpstate(fpu);
+       } else {
+               this_cpu_write(fpu_fpregs_owner_ctx, NULL);
+               __fpregs_activate_hw();
+       }
+}
+EXPORT_SYMBOL(__kernel_fpu_begin);
+
+void __kernel_fpu_end(void)
+{
+       struct fpu *fpu = &current->thread.fpu;
+
+       if (fpu->fpregs_active)
+               copy_kernel_to_fpregs(&fpu->state);
+       else
+               __fpregs_deactivate_hw();
+
+       kernel_fpu_enable();
+}
+EXPORT_SYMBOL(__kernel_fpu_end);
+
+void kernel_fpu_begin(void)
+{
+       preempt_disable();
+       __kernel_fpu_begin();
+}
+EXPORT_SYMBOL_GPL(kernel_fpu_begin);
+
+void kernel_fpu_end(void)
+{
+       __kernel_fpu_end();
+       preempt_enable();
+}
+EXPORT_SYMBOL_GPL(kernel_fpu_end);
+
+/*
+ * CR0::TS save/restore functions:
+ */
+int irq_ts_save(void)
+{
+       /*
+        * If in process context and not atomic, we can take a spurious DNA fault.
+        * Otherwise, doing clts() in process context requires disabling preemption
+        * or some heavy lifting like kernel_fpu_begin()
+        */
+       if (!in_atomic())
+               return 0;
+
+       if (read_cr0() & X86_CR0_TS) {
+               clts();
+               return 1;
+       }
+
+       return 0;
+}
+EXPORT_SYMBOL_GPL(irq_ts_save);
+
+void irq_ts_restore(int TS_state)
+{
+       if (TS_state)
+               stts();
+}
+EXPORT_SYMBOL_GPL(irq_ts_restore);
+
+/*
+ * Save the FPU state (mark it for reload if necessary):
+ *
+ * This only ever gets called for the current task.
+ */
+void fpu__save(struct fpu *fpu)
+{
+       WARN_ON_FPU(fpu != &current->thread.fpu);
+
+       preempt_disable();
+       if (fpu->fpregs_active) {
+               if (!copy_fpregs_to_fpstate(fpu))
+                       fpregs_deactivate(fpu);
+       }
+       preempt_enable();
+}
+EXPORT_SYMBOL_GPL(fpu__save);
+
+/*
+ * Legacy x87 fpstate state init:
+ */
+static inline void fpstate_init_fstate(struct fregs_state *fp)
+{
+       fp->cwd = 0xffff037fu;
+       fp->swd = 0xffff0000u;
+       fp->twd = 0xffffffffu;
+       fp->fos = 0xffff0000u;
+}
+
+void fpstate_init(union fpregs_state *state)
+{
+       if (!cpu_has_fpu) {
+               fpstate_init_soft(&state->soft);
+               return;
+       }
+
+       memset(state, 0, xstate_size);
+
+       if (cpu_has_fxsr)
+               fpstate_init_fxstate(&state->fxsave);
+       else
+               fpstate_init_fstate(&state->fsave);
+}
+EXPORT_SYMBOL_GPL(fpstate_init);
+
+/*
+ * Copy the current task's FPU state to a new task's FPU context.
+ *
+ * In both the 'eager' and the 'lazy' case we save hardware registers
+ * directly to the destination buffer.
+ */
+static void fpu_copy(struct fpu *dst_fpu, struct fpu *src_fpu)
+{
+       WARN_ON_FPU(src_fpu != &current->thread.fpu);
+
+       /*
+        * Don't let 'init optimized' areas of the XSAVE area
+        * leak into the child task:
+        */
+       if (use_eager_fpu())
+               memset(&dst_fpu->state.xsave, 0, xstate_size);
+
+       /*
+        * Save current FPU registers directly into the child
+        * FPU context, without any memory-to-memory copying.
+        *
+        * If the FPU context got destroyed in the process (FNSAVE
+        * done on old CPUs) then copy it back into the source
+        * context and mark the current task for lazy restore.
+        *
+        * We have to do all this with preemption disabled,
+        * mostly because of the FNSAVE case, because in that
+        * case we must not allow preemption in the window
+        * between the FNSAVE and us marking the context lazy.
+        *
+        * It shouldn't be an issue as even FNSAVE is plenty
+        * fast in terms of critical section length.
+        */
+       preempt_disable();
+       if (!copy_fpregs_to_fpstate(dst_fpu)) {
+               memcpy(&src_fpu->state, &dst_fpu->state, xstate_size);
+               fpregs_deactivate(src_fpu);
+       }
+       preempt_enable();
+}
+
+int fpu__copy(struct fpu *dst_fpu, struct fpu *src_fpu)
+{
+       dst_fpu->counter = 0;
+       dst_fpu->fpregs_active = 0;
+       dst_fpu->last_cpu = -1;
+
+       if (src_fpu->fpstate_active)
+               fpu_copy(dst_fpu, src_fpu);
+
+       return 0;
+}
+
+/*
+ * Activate the current task's in-memory FPU context,
+ * if it has not been used before:
+ */
+void fpu__activate_curr(struct fpu *fpu)
+{
+       WARN_ON_FPU(fpu != &current->thread.fpu);
+
+       if (!fpu->fpstate_active) {
+               fpstate_init(&fpu->state);
+
+               /* Safe to do for the current task: */
+               fpu->fpstate_active = 1;
+       }
+}
+EXPORT_SYMBOL_GPL(fpu__activate_curr);
+
+/*
+ * This function must be called before we read a task's fpstate.
+ *
+ * If the task has not used the FPU before then initialize its
+ * fpstate.
+ *
+ * If the task has used the FPU before then save it.
+ */
+void fpu__activate_fpstate_read(struct fpu *fpu)
+{
+       /*
+        * If fpregs are active (in the current CPU), then
+        * copy them to the fpstate:
+        */
+       if (fpu->fpregs_active) {
+               fpu__save(fpu);
+       } else {
+               if (!fpu->fpstate_active) {
+                       fpstate_init(&fpu->state);
+
+                       /* Safe to do for current and for stopped child tasks: */
+                       fpu->fpstate_active = 1;
+               }
+       }
+}
+
+/*
+ * This function must be called before we write a task's fpstate.
+ *
+ * If the task has used the FPU before then unlazy it.
+ * If the task has not used the FPU before then initialize its fpstate.
+ *
+ * After this function call, after registers in the fpstate are
+ * modified and the child task has woken up, the child task will
+ * restore the modified FPU state from the modified context. If we
+ * didn't clear its lazy status here then the lazy in-registers
+ * state pending on its former CPU could be restored, corrupting
+ * the modifications.
+ */
+void fpu__activate_fpstate_write(struct fpu *fpu)
+{
+       /*
+        * Only stopped child tasks can be used to modify the FPU
+        * state in the fpstate buffer:
+        */
+       WARN_ON_FPU(fpu == &current->thread.fpu);
+
+       if (fpu->fpstate_active) {
+               /* Invalidate any lazy state: */
+               fpu->last_cpu = -1;
+       } else {
+               fpstate_init(&fpu->state);
+
+               /* Safe to do for stopped child tasks: */
+               fpu->fpstate_active = 1;
+       }
+}
+
+/*
+ * 'fpu__restore()' is called to copy FPU registers from
+ * the FPU fpstate to the live hw registers and to activate
+ * access to the hardware registers, so that FPU instructions
+ * can be used afterwards.
+ *
+ * Must be called with kernel preemption disabled (for example
+ * with local interrupts disabled, as it is in the case of
+ * do_device_not_available()).
+ */
+void fpu__restore(struct fpu *fpu)
+{
+       fpu__activate_curr(fpu);
+
+       /* Avoid __kernel_fpu_begin() right after fpregs_activate() */
+       kernel_fpu_disable();
+       fpregs_activate(fpu);
+       copy_kernel_to_fpregs(&fpu->state);
+       fpu->counter++;
+       kernel_fpu_enable();
+}
+EXPORT_SYMBOL_GPL(fpu__restore);
+
+/*
+ * Drops current FPU state: deactivates the fpregs and
+ * the fpstate. NOTE: it still leaves previous contents
+ * in the fpregs in the eager-FPU case.
+ *
+ * This function can be used in cases where we know that
+ * a state-restore is coming: either an explicit one,
+ * or a reschedule.
+ */
+void fpu__drop(struct fpu *fpu)
+{
+       preempt_disable();
+       fpu->counter = 0;
+
+       if (fpu->fpregs_active) {
+               /* Ignore delayed exceptions from user space */
+               asm volatile("1: fwait\n"
+                            "2:\n"
+                            _ASM_EXTABLE(1b, 2b));
+               fpregs_deactivate(fpu);
+       }
+
+       fpu->fpstate_active = 0;
+
+       preempt_enable();
+}
+
+/*
+ * Clear FPU registers by setting them up from
+ * the init fpstate:
+ */
+static inline void copy_init_fpstate_to_fpregs(void)
+{
+       if (use_xsave())
+               copy_kernel_to_xregs(&init_fpstate.xsave, -1);
+       else
+               copy_kernel_to_fxregs(&init_fpstate.fxsave);
+}
+
+/*
+ * Clear the FPU state back to init state.
+ *
+ * Called by sys_execve(), by the signal handler code and by various
+ * error paths.
+ */
+void fpu__clear(struct fpu *fpu)
+{
+       WARN_ON_FPU(fpu != &current->thread.fpu); /* Almost certainly an anomaly */
+
+       if (!use_eager_fpu()) {
+               /* FPU state will be reallocated lazily at the first use. */
+               fpu__drop(fpu);
+       } else {
+               if (!fpu->fpstate_active) {
+                       fpu__activate_curr(fpu);
+                       user_fpu_begin();
+               }
+               copy_init_fpstate_to_fpregs();
+       }
+}
+
+/*
+ * x87 math exception handling:
+ */
+
+static inline unsigned short get_fpu_cwd(struct fpu *fpu)
+{
+       if (cpu_has_fxsr) {
+               return fpu->state.fxsave.cwd;
+       } else {
+               return (unsigned short)fpu->state.fsave.cwd;
+       }
+}
+
+static inline unsigned short get_fpu_swd(struct fpu *fpu)
+{
+       if (cpu_has_fxsr) {
+               return fpu->state.fxsave.swd;
+       } else {
+               return (unsigned short)fpu->state.fsave.swd;
+       }
+}
+
+static inline unsigned short get_fpu_mxcsr(struct fpu *fpu)
+{
+       if (cpu_has_xmm) {
+               return fpu->state.fxsave.mxcsr;
+       } else {
+               return MXCSR_DEFAULT;
+       }
+}
+
+int fpu__exception_code(struct fpu *fpu, int trap_nr)
+{
+       int err;
+
+       if (trap_nr == X86_TRAP_MF) {
+               unsigned short cwd, swd;
+               /*
+                * (~cwd & swd) will mask out exceptions that are not set to unmasked
+                * status.  0x3f is the exception bits in these regs, 0x200 is the
+                * C1 reg you need in case of a stack fault, 0x040 is the stack
+                * fault bit.  We should only be taking one exception at a time,
+                * so if this combination doesn't produce any single exception,
+                * then we have a bad program that isn't synchronizing its FPU usage
+                * and it will suffer the consequences since we won't be able to
+                * fully reproduce the context of the exception
+                */
+               cwd = get_fpu_cwd(fpu);
+               swd = get_fpu_swd(fpu);
+
+               err = swd & ~cwd;
+       } else {
+               /*
+                * The SIMD FPU exceptions are handled a little differently, as there
+                * is only a single status/control register.  Thus, to determine which
+                * unmasked exception was caught we must mask the exception mask bits
+                * at 0x1f80, and then use these to mask the exception bits at 0x3f.
+                */
+               unsigned short mxcsr = get_fpu_mxcsr(fpu);
+               err = ~(mxcsr >> 7) & mxcsr;
+       }
+
+       if (err & 0x001) {      /* Invalid op */
+               /*
+                * swd & 0x240 == 0x040: Stack Underflow
+                * swd & 0x240 == 0x240: Stack Overflow
+                * User must clear the SF bit (0x40) if set
+                */
+               return FPE_FLTINV;
+       } else if (err & 0x004) { /* Divide by Zero */
+               return FPE_FLTDIV;
+       } else if (err & 0x008) { /* Overflow */
+               return FPE_FLTOVF;
+       } else if (err & 0x012) { /* Denormal, Underflow */
+               return FPE_FLTUND;
+       } else if (err & 0x020) { /* Precision */
+               return FPE_FLTRES;
+       }
+
+       /*
+        * If we're using IRQ 13, or supposedly even some trap
+        * X86_TRAP_MF implementations, it's possible
+        * we get a spurious trap, which is not an error.
+        */
+       return 0;
+}
diff --git a/arch/x86/kernel/fpu/init.c b/arch/x86/kernel/fpu/init.c
new file mode 100644 (file)
index 0000000..fc878fe
--- /dev/null
@@ -0,0 +1,354 @@
+/*
+ * x86 FPU boot time init code:
+ */
+#include <asm/fpu/internal.h>
+#include <asm/tlbflush.h>
+
+/*
+ * Initialize the TS bit in CR0 according to the style of context-switches
+ * we are using:
+ */
+static void fpu__init_cpu_ctx_switch(void)
+{
+       if (!cpu_has_eager_fpu)
+               stts();
+       else
+               clts();
+}
+
+/*
+ * Initialize the registers found in all CPUs, CR0 and CR4:
+ */
+static void fpu__init_cpu_generic(void)
+{
+       unsigned long cr0;
+       unsigned long cr4_mask = 0;
+
+       if (cpu_has_fxsr)
+               cr4_mask |= X86_CR4_OSFXSR;
+       if (cpu_has_xmm)
+               cr4_mask |= X86_CR4_OSXMMEXCPT;
+       if (cr4_mask)
+               cr4_set_bits(cr4_mask);
+
+       cr0 = read_cr0();
+       cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */
+       if (!cpu_has_fpu)
+               cr0 |= X86_CR0_EM;
+       write_cr0(cr0);
+
+       /* Flush out any pending x87 state: */
+       asm volatile ("fninit");
+}
+
+/*
+ * Enable all supported FPU features. Called when a CPU is brought online:
+ */
+void fpu__init_cpu(void)
+{
+       fpu__init_cpu_generic();
+       fpu__init_cpu_xstate();
+       fpu__init_cpu_ctx_switch();
+}
+
+/*
+ * The earliest FPU detection code.
+ *
+ * Set the X86_FEATURE_FPU CPU-capability bit based on
+ * trying to execute an actual sequence of FPU instructions:
+ */
+static void fpu__init_system_early_generic(struct cpuinfo_x86 *c)
+{
+       unsigned long cr0;
+       u16 fsw, fcw;
+
+       fsw = fcw = 0xffff;
+
+       cr0 = read_cr0();
+       cr0 &= ~(X86_CR0_TS | X86_CR0_EM);
+       write_cr0(cr0);
+
+       asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
+                    : "+m" (fsw), "+m" (fcw));
+
+       if (fsw == 0 && (fcw & 0x103f) == 0x003f)
+               set_cpu_cap(c, X86_FEATURE_FPU);
+       else
+               clear_cpu_cap(c, X86_FEATURE_FPU);
+
+#ifndef CONFIG_MATH_EMULATION
+       if (!cpu_has_fpu) {
+               pr_emerg("x86/fpu: Giving up, no FPU found and no math emulation present\n");
+               for (;;)
+                       asm volatile("hlt");
+       }
+#endif
+}
+
+/*
+ * Boot time FPU feature detection code:
+ */
+unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
+
+static void __init fpu__init_system_mxcsr(void)
+{
+       unsigned int mask = 0;
+
+       if (cpu_has_fxsr) {
+               struct fxregs_state fx_tmp __aligned(32) = { };
+
+               asm volatile("fxsave %0" : "+m" (fx_tmp));
+
+               mask = fx_tmp.mxcsr_mask;
+
+               /*
+                * If zero then use the default features mask,
+                * which has all features set, except the
+                * denormals-are-zero feature bit:
+                */
+               if (mask == 0)
+                       mask = 0x0000ffbf;
+       }
+       mxcsr_feature_mask &= mask;
+}
+
+/*
+ * Once per bootup FPU initialization sequences that will run on most x86 CPUs:
+ */
+static void __init fpu__init_system_generic(void)
+{
+       /*
+        * Set up the legacy init FPU context. (xstate init might overwrite this
+        * with a more modern format, if the CPU supports it.)
+        */
+       fpstate_init_fxstate(&init_fpstate.fxsave);
+
+       fpu__init_system_mxcsr();
+}
+
+/*
+ * Size of the FPU context state. All tasks in the system use the
+ * same context size, regardless of what portion they use.
+ * This is inherent to the XSAVE architecture which puts all state
+ * components into a single, continuous memory block:
+ */
+unsigned int xstate_size;
+EXPORT_SYMBOL_GPL(xstate_size);
+
+/*
+ * Set up the xstate_size based on the legacy FPU context size.
+ *
+ * We set this up first, and later it will be overwritten by
+ * fpu__init_system_xstate() if the CPU knows about xstates.
+ */
+static void __init fpu__init_system_xstate_size_legacy(void)
+{
+       static int on_boot_cpu = 1;
+
+       WARN_ON_FPU(!on_boot_cpu);
+       on_boot_cpu = 0;
+
+       /*
+        * Note that xstate_size might be overwriten later during
+        * fpu__init_system_xstate().
+        */
+
+       if (!cpu_has_fpu) {
+               /*
+                * Disable xsave as we do not support it if i387
+                * emulation is enabled.
+                */
+               setup_clear_cpu_cap(X86_FEATURE_XSAVE);
+               setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
+               xstate_size = sizeof(struct swregs_state);
+       } else {
+               if (cpu_has_fxsr)
+                       xstate_size = sizeof(struct fxregs_state);
+               else
+                       xstate_size = sizeof(struct fregs_state);
+       }
+       /*
+        * Quirk: we don't yet handle the XSAVES* instructions
+        * correctly, as we don't correctly convert between
+        * standard and compacted format when interfacing
+        * with user-space - so disable it for now.
+        *
+        * The difference is small: with recent CPUs the
+        * compacted format is only marginally smaller than
+        * the standard FPU state format.
+        *
+        * ( This is easy to backport while we are fixing
+        *   XSAVES* support. )
+        */
+       setup_clear_cpu_cap(X86_FEATURE_XSAVES);
+}
+
+/*
+ * FPU context switching strategies:
+ *
+ * Against popular belief, we don't do lazy FPU saves, due to the
+ * task migration complications it brings on SMP - we only do
+ * lazy FPU restores.
+ *
+ * 'lazy' is the traditional strategy, which is based on setting
+ * CR0::TS to 1 during context-switch (instead of doing a full
+ * restore of the FPU state), which causes the first FPU instruction
+ * after the context switch (whenever it is executed) to fault - at
+ * which point we lazily restore the FPU state into FPU registers.
+ *
+ * Tasks are of course under no obligation to execute FPU instructions,
+ * so it can easily happen that another context-switch occurs without
+ * a single FPU instruction being executed. If we eventually switch
+ * back to the original task (that still owns the FPU) then we have
+ * not only saved the restores along the way, but we also have the
+ * FPU ready to be used for the original task.
+ *
+ * 'eager' switching is used on modern CPUs, there we switch the FPU
+ * state during every context switch, regardless of whether the task
+ * has used FPU instructions in that time slice or not. This is done
+ * because modern FPU context saving instructions are able to optimize
+ * state saving and restoration in hardware: they can detect both
+ * unused and untouched FPU state and optimize accordingly.
+ *
+ * [ Note that even in 'lazy' mode we might optimize context switches
+ *   to use 'eager' restores, if we detect that a task is using the FPU
+ *   frequently. See the fpu->counter logic in fpu/internal.h for that. ]
+ */
+static enum { AUTO, ENABLE, DISABLE } eagerfpu = AUTO;
+
+static int __init eager_fpu_setup(char *s)
+{
+       if (!strcmp(s, "on"))
+               eagerfpu = ENABLE;
+       else if (!strcmp(s, "off"))
+               eagerfpu = DISABLE;
+       else if (!strcmp(s, "auto"))
+               eagerfpu = AUTO;
+       return 1;
+}
+__setup("eagerfpu=", eager_fpu_setup);
+
+/*
+ * Pick the FPU context switching strategy:
+ */
+static void __init fpu__init_system_ctx_switch(void)
+{
+       static bool on_boot_cpu = 1;
+
+       WARN_ON_FPU(!on_boot_cpu);
+       on_boot_cpu = 0;
+
+       WARN_ON_FPU(current->thread.fpu.fpstate_active);
+       current_thread_info()->status = 0;
+
+       /* Auto enable eagerfpu for xsaveopt */
+       if (cpu_has_xsaveopt && eagerfpu != DISABLE)
+               eagerfpu = ENABLE;
+
+       if (xfeatures_mask & XSTATE_EAGER) {
+               if (eagerfpu == DISABLE) {
+                       pr_err("x86/fpu: eagerfpu switching disabled, disabling the following xstate features: 0x%llx.\n",
+                              xfeatures_mask & XSTATE_EAGER);
+                       xfeatures_mask &= ~XSTATE_EAGER;
+               } else {
+                       eagerfpu = ENABLE;
+               }
+       }
+
+       if (eagerfpu == ENABLE)
+               setup_force_cpu_cap(X86_FEATURE_EAGER_FPU);
+
+       printk(KERN_INFO "x86/fpu: Using '%s' FPU context switches.\n", eagerfpu == ENABLE ? "eager" : "lazy");
+}
+
+/*
+ * Called on the boot CPU once per system bootup, to set up the initial
+ * FPU state that is later cloned into all processes:
+ */
+void __init fpu__init_system(struct cpuinfo_x86 *c)
+{
+       fpu__init_system_early_generic(c);
+
+       /*
+        * The FPU has to be operational for some of the
+        * later FPU init activities:
+        */
+       fpu__init_cpu();
+
+       /*
+        * But don't leave CR0::TS set yet, as some of the FPU setup
+        * methods depend on being able to execute FPU instructions
+        * that will fault on a set TS, such as the FXSAVE in
+        * fpu__init_system_mxcsr().
+        */
+       clts();
+
+       fpu__init_system_generic();
+       fpu__init_system_xstate_size_legacy();
+       fpu__init_system_xstate();
+
+       fpu__init_system_ctx_switch();
+}
+
+/*
+ * Boot parameter to turn off FPU support and fall back to math-emu:
+ */
+static int __init no_387(char *s)
+{
+       setup_clear_cpu_cap(X86_FEATURE_FPU);
+       return 1;
+}
+__setup("no387", no_387);
+
+/*
+ * Disable all xstate CPU features:
+ */
+static int __init x86_noxsave_setup(char *s)
+{
+       if (strlen(s))
+               return 0;
+
+       setup_clear_cpu_cap(X86_FEATURE_XSAVE);
+       setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
+       setup_clear_cpu_cap(X86_FEATURE_XSAVES);
+       setup_clear_cpu_cap(X86_FEATURE_AVX);
+       setup_clear_cpu_cap(X86_FEATURE_AVX2);
+
+       return 1;
+}
+__setup("noxsave", x86_noxsave_setup);
+
+/*
+ * Disable the XSAVEOPT instruction specifically:
+ */
+static int __init x86_noxsaveopt_setup(char *s)
+{
+       setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
+
+       return 1;
+}
+__setup("noxsaveopt", x86_noxsaveopt_setup);
+
+/*
+ * Disable the XSAVES instruction:
+ */
+static int __init x86_noxsaves_setup(char *s)
+{
+       setup_clear_cpu_cap(X86_FEATURE_XSAVES);
+
+       return 1;
+}
+__setup("noxsaves", x86_noxsaves_setup);
+
+/*
+ * Disable FX save/restore and SSE support:
+ */
+static int __init x86_nofxsr_setup(char *s)
+{
+       setup_clear_cpu_cap(X86_FEATURE_FXSR);
+       setup_clear_cpu_cap(X86_FEATURE_FXSR_OPT);
+       setup_clear_cpu_cap(X86_FEATURE_XMM);
+
+       return 1;
+}
+__setup("nofxsr", x86_nofxsr_setup);
diff --git a/arch/x86/kernel/fpu/regset.c b/arch/x86/kernel/fpu/regset.c
new file mode 100644 (file)
index 0000000..dc60810
--- /dev/null
@@ -0,0 +1,356 @@
+/*
+ * FPU register's regset abstraction, for ptrace, core dumps, etc.
+ */
+#include <asm/fpu/internal.h>
+#include <asm/fpu/signal.h>
+#include <asm/fpu/regset.h>
+
+/*
+ * The xstateregs_active() routine is the same as the regset_fpregs_active() routine,
+ * as the "regset->n" for the xstate regset will be updated based on the feature
+ * capabilites supported by the xsave.
+ */
+int regset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
+{
+       struct fpu *target_fpu = &target->thread.fpu;
+
+       return target_fpu->fpstate_active ? regset->n : 0;
+}
+
+int regset_xregset_fpregs_active(struct task_struct *target, const struct user_regset *regset)
+{
+       struct fpu *target_fpu = &target->thread.fpu;
+
+       return (cpu_has_fxsr && target_fpu->fpstate_active) ? regset->n : 0;
+}
+
+int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
+               unsigned int pos, unsigned int count,
+               void *kbuf, void __user *ubuf)
+{
+       struct fpu *fpu = &target->thread.fpu;
+
+       if (!cpu_has_fxsr)
+               return -ENODEV;
+
+       fpu__activate_fpstate_read(fpu);
+       fpstate_sanitize_xstate(fpu);
+
+       return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+                                  &fpu->state.fxsave, 0, -1);
+}
+
+int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
+               unsigned int pos, unsigned int count,
+               const void *kbuf, const void __user *ubuf)
+{
+       struct fpu *fpu = &target->thread.fpu;
+       int ret;
+
+       if (!cpu_has_fxsr)
+               return -ENODEV;
+
+       fpu__activate_fpstate_write(fpu);
+       fpstate_sanitize_xstate(fpu);
+
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                &fpu->state.fxsave, 0, -1);
+
+       /*
+        * mxcsr reserved bits must be masked to zero for security reasons.
+        */
+       fpu->state.fxsave.mxcsr &= mxcsr_feature_mask;
+
+       /*
+        * update the header bits in the xsave header, indicating the
+        * presence of FP and SSE state.
+        */
+       if (cpu_has_xsave)
+               fpu->state.xsave.header.xfeatures |= XSTATE_FPSSE;
+
+       return ret;
+}
+
+int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
+               unsigned int pos, unsigned int count,
+               void *kbuf, void __user *ubuf)
+{
+       struct fpu *fpu = &target->thread.fpu;
+       struct xregs_state *xsave;
+       int ret;
+
+       if (!cpu_has_xsave)
+               return -ENODEV;
+
+       fpu__activate_fpstate_read(fpu);
+
+       xsave = &fpu->state.xsave;
+
+       /*
+        * Copy the 48bytes defined by the software first into the xstate
+        * memory layout in the thread struct, so that we can copy the entire
+        * xstateregs to the user using one user_regset_copyout().
+        */
+       memcpy(&xsave->i387.sw_reserved,
+               xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
+       /*
+        * Copy the xstate memory layout.
+        */
+       ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
+       return ret;
+}
+
+int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
+                 unsigned int pos, unsigned int count,
+                 const void *kbuf, const void __user *ubuf)
+{
+       struct fpu *fpu = &target->thread.fpu;
+       struct xregs_state *xsave;
+       int ret;
+
+       if (!cpu_has_xsave)
+               return -ENODEV;
+
+       fpu__activate_fpstate_write(fpu);
+
+       xsave = &fpu->state.xsave;
+
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
+       /*
+        * mxcsr reserved bits must be masked to zero for security reasons.
+        */
+       xsave->i387.mxcsr &= mxcsr_feature_mask;
+       xsave->header.xfeatures &= xfeatures_mask;
+       /*
+        * These bits must be zero.
+        */
+       memset(&xsave->header.reserved, 0, 48);
+
+       return ret;
+}
+
+#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
+
+/*
+ * FPU tag word conversions.
+ */
+
+static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
+{
+       unsigned int tmp; /* to avoid 16 bit prefixes in the code */
+
+       /* Transform each pair of bits into 01 (valid) or 00 (empty) */
+       tmp = ~twd;
+       tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
+       /* and move the valid bits to the lower byte. */
+       tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
+       tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
+       tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
+
+       return tmp;
+}
+
+#define FPREG_ADDR(f, n)       ((void *)&(f)->st_space + (n) * 16)
+#define FP_EXP_TAG_VALID       0
+#define FP_EXP_TAG_ZERO                1
+#define FP_EXP_TAG_SPECIAL     2
+#define FP_EXP_TAG_EMPTY       3
+
+static inline u32 twd_fxsr_to_i387(struct fxregs_state *fxsave)
+{
+       struct _fpxreg *st;
+       u32 tos = (fxsave->swd >> 11) & 7;
+       u32 twd = (unsigned long) fxsave->twd;
+       u32 tag;
+       u32 ret = 0xffff0000u;
+       int i;
+
+       for (i = 0; i < 8; i++, twd >>= 1) {
+               if (twd & 0x1) {
+                       st = FPREG_ADDR(fxsave, (i - tos) & 7);
+
+                       switch (st->exponent & 0x7fff) {
+                       case 0x7fff:
+                               tag = FP_EXP_TAG_SPECIAL;
+                               break;
+                       case 0x0000:
+                               if (!st->significand[0] &&
+                                   !st->significand[1] &&
+                                   !st->significand[2] &&
+                                   !st->significand[3])
+                                       tag = FP_EXP_TAG_ZERO;
+                               else
+                                       tag = FP_EXP_TAG_SPECIAL;
+                               break;
+                       default:
+                               if (st->significand[3] & 0x8000)
+                                       tag = FP_EXP_TAG_VALID;
+                               else
+                                       tag = FP_EXP_TAG_SPECIAL;
+                               break;
+                       }
+               } else {
+                       tag = FP_EXP_TAG_EMPTY;
+               }
+               ret |= tag << (2 * i);
+       }
+       return ret;
+}
+
+/*
+ * FXSR floating point environment conversions.
+ */
+
+void
+convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
+{
+       struct fxregs_state *fxsave = &tsk->thread.fpu.state.fxsave;
+       struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
+       struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
+       int i;
+
+       env->cwd = fxsave->cwd | 0xffff0000u;
+       env->swd = fxsave->swd | 0xffff0000u;
+       env->twd = twd_fxsr_to_i387(fxsave);
+
+#ifdef CONFIG_X86_64
+       env->fip = fxsave->rip;
+       env->foo = fxsave->rdp;
+       /*
+        * should be actually ds/cs at fpu exception time, but
+        * that information is not available in 64bit mode.
+        */
+       env->fcs = task_pt_regs(tsk)->cs;
+       if (tsk == current) {
+               savesegment(ds, env->fos);
+       } else {
+               env->fos = tsk->thread.ds;
+       }
+       env->fos |= 0xffff0000;
+#else
+       env->fip = fxsave->fip;
+       env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16);
+       env->foo = fxsave->foo;
+       env->fos = fxsave->fos;
+#endif
+
+       for (i = 0; i < 8; ++i)
+               memcpy(&to[i], &from[i], sizeof(to[0]));
+}
+
+void convert_to_fxsr(struct task_struct *tsk,
+                    const struct user_i387_ia32_struct *env)
+
+{
+       struct fxregs_state *fxsave = &tsk->thread.fpu.state.fxsave;
+       struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
+       struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
+       int i;
+
+       fxsave->cwd = env->cwd;
+       fxsave->swd = env->swd;
+       fxsave->twd = twd_i387_to_fxsr(env->twd);
+       fxsave->fop = (u16) ((u32) env->fcs >> 16);
+#ifdef CONFIG_X86_64
+       fxsave->rip = env->fip;
+       fxsave->rdp = env->foo;
+       /* cs and ds ignored */
+#else
+       fxsave->fip = env->fip;
+       fxsave->fcs = (env->fcs & 0xffff);
+       fxsave->foo = env->foo;
+       fxsave->fos = env->fos;
+#endif
+
+       for (i = 0; i < 8; ++i)
+               memcpy(&to[i], &from[i], sizeof(from[0]));
+}
+
+int fpregs_get(struct task_struct *target, const struct user_regset *regset,
+              unsigned int pos, unsigned int count,
+              void *kbuf, void __user *ubuf)
+{
+       struct fpu *fpu = &target->thread.fpu;
+       struct user_i387_ia32_struct env;
+
+       fpu__activate_fpstate_read(fpu);
+
+       if (!static_cpu_has(X86_FEATURE_FPU))
+               return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
+
+       if (!cpu_has_fxsr)
+               return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
+                                          &fpu->state.fsave, 0,
+                                          -1);
+
+       fpstate_sanitize_xstate(fpu);
+
+       if (kbuf && pos == 0 && count == sizeof(env)) {
+               convert_from_fxsr(kbuf, target);
+               return 0;
+       }
+
+       convert_from_fxsr(&env, target);
+
+       return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
+}
+
+int fpregs_set(struct task_struct *target, const struct user_regset *regset,
+              unsigned int pos, unsigned int count,
+              const void *kbuf, const void __user *ubuf)
+{
+       struct fpu *fpu = &target->thread.fpu;
+       struct user_i387_ia32_struct env;
+       int ret;
+
+       fpu__activate_fpstate_write(fpu);
+       fpstate_sanitize_xstate(fpu);
+
+       if (!static_cpu_has(X86_FEATURE_FPU))
+               return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
+
+       if (!cpu_has_fxsr)
+               return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+                                         &fpu->state.fsave, 0,
+                                         -1);
+
+       if (pos > 0 || count < sizeof(env))
+               convert_from_fxsr(&env, target);
+
+       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
+       if (!ret)
+               convert_to_fxsr(target, &env);
+
+       /*
+        * update the header bit in the xsave header, indicating the
+        * presence of FP.
+        */
+       if (cpu_has_xsave)
+               fpu->state.xsave.header.xfeatures |= XSTATE_FP;
+       return ret;
+}
+
+/*
+ * FPU state for core dumps.
+ * This is only used for a.out dumps now.
+ * It is declared generically using elf_fpregset_t (which is
+ * struct user_i387_struct) but is in fact only used for 32-bit
+ * dumps, so on 64-bit it is really struct user_i387_ia32_struct.
+ */
+int dump_fpu(struct pt_regs *regs, struct user_i387_struct *ufpu)
+{
+       struct task_struct *tsk = current;
+       struct fpu *fpu = &tsk->thread.fpu;
+       int fpvalid;
+
+       fpvalid = fpu->fpstate_active;
+       if (fpvalid)
+               fpvalid = !fpregs_get(tsk, NULL,
+                                     0, sizeof(struct user_i387_ia32_struct),
+                                     ufpu, NULL);
+
+       return fpvalid;
+}
+EXPORT_SYMBOL(dump_fpu);
+
+#endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
diff --git a/arch/x86/kernel/fpu/signal.c b/arch/x86/kernel/fpu/signal.c
new file mode 100644 (file)
index 0000000..50ec9af
--- /dev/null
@@ -0,0 +1,404 @@
+/*
+ * FPU signal frame handling routines.
+ */
+
+#include <linux/compat.h>
+#include <linux/cpu.h>
+
+#include <asm/fpu/internal.h>
+#include <asm/fpu/signal.h>
+#include <asm/fpu/regset.h>
+
+#include <asm/sigframe.h>
+
+static struct _fpx_sw_bytes fx_sw_reserved, fx_sw_reserved_ia32;
+
+/*
+ * Check for the presence of extended state information in the
+ * user fpstate pointer in the sigcontext.
+ */
+static inline int check_for_xstate(struct fxregs_state __user *buf,
+                                  void __user *fpstate,
+                                  struct _fpx_sw_bytes *fx_sw)
+{
+       int min_xstate_size = sizeof(struct fxregs_state) +
+                             sizeof(struct xstate_header);
+       unsigned int magic2;
+
+       if (__copy_from_user(fx_sw, &buf->sw_reserved[0], sizeof(*fx_sw)))
+               return -1;
+
+       /* Check for the first magic field and other error scenarios. */
+       if (fx_sw->magic1 != FP_XSTATE_MAGIC1 ||
+           fx_sw->xstate_size < min_xstate_size ||
+           fx_sw->xstate_size > xstate_size ||
+           fx_sw->xstate_size > fx_sw->extended_size)
+               return -1;
+
+       /*
+        * Check for the presence of second magic word at the end of memory
+        * layout. This detects the case where the user just copied the legacy
+        * fpstate layout with out copying the extended state information
+        * in the memory layout.
+        */
+       if (__get_user(magic2, (__u32 __user *)(fpstate + fx_sw->xstate_size))
+           || magic2 != FP_XSTATE_MAGIC2)
+               return -1;
+
+       return 0;
+}
+
+/*
+ * Signal frame handlers.
+ */
+static inline int save_fsave_header(struct task_struct *tsk, void __user *buf)
+{
+       if (use_fxsr()) {
+               struct xregs_state *xsave = &tsk->thread.fpu.state.xsave;
+               struct user_i387_ia32_struct env;
+               struct _fpstate_ia32 __user *fp = buf;
+
+               convert_from_fxsr(&env, tsk);
+
+               if (__copy_to_user(buf, &env, sizeof(env)) ||
+                   __put_user(xsave->i387.swd, &fp->status) ||
+                   __put_user(X86_FXSR_MAGIC, &fp->magic))
+                       return -1;
+       } else {
+               struct fregs_state __user *fp = buf;
+               u32 swd;
+               if (__get_user(swd, &fp->swd) || __put_user(swd, &fp->status))
+                       return -1;
+       }
+
+       return 0;
+}
+
+static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
+{
+       struct xregs_state __user *x = buf;
+       struct _fpx_sw_bytes *sw_bytes;
+       u32 xfeatures;
+       int err;
+
+       /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
+       sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
+       err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
+
+       if (!use_xsave())
+               return err;
+
+       err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
+
+       /*
+        * Read the xfeatures which we copied (directly from the cpu or
+        * from the state in task struct) to the user buffers.
+        */
+       err |= __get_user(xfeatures, (__u32 *)&x->header.xfeatures);
+
+       /*
+        * For legacy compatible, we always set FP/SSE bits in the bit
+        * vector while saving the state to the user context. This will
+        * enable us capturing any changes(during sigreturn) to
+        * the FP/SSE bits by the legacy applications which don't touch
+        * xfeatures in the xsave header.
+        *
+        * xsave aware apps can change the xfeatures in the xsave
+        * header as well as change any contents in the memory layout.
+        * xrestore as part of sigreturn will capture all the changes.
+        */
+       xfeatures |= XSTATE_FPSSE;
+
+       err |= __put_user(xfeatures, (__u32 *)&x->header.xfeatures);
+
+       return err;
+}
+
+static inline int copy_fpregs_to_sigframe(struct xregs_state __user *buf)
+{
+       int err;
+
+       if (use_xsave())
+               err = copy_xregs_to_user(buf);
+       else if (use_fxsr())
+               err = copy_fxregs_to_user((struct fxregs_state __user *) buf);
+       else
+               err = copy_fregs_to_user((struct fregs_state __user *) buf);
+
+       if (unlikely(err) && __clear_user(buf, xstate_size))
+               err = -EFAULT;
+       return err;
+}
+
+/*
+ * Save the fpu, extended register state to the user signal frame.
+ *
+ * 'buf_fx' is the 64-byte aligned pointer at which the [f|fx|x]save
+ *  state is copied.
+ *  'buf' points to the 'buf_fx' or to the fsave header followed by 'buf_fx'.
+ *
+ *     buf == buf_fx for 64-bit frames and 32-bit fsave frame.
+ *     buf != buf_fx for 32-bit frames with fxstate.
+ *
+ * If the fpu, extended register state is live, save the state directly
+ * to the user frame pointed by the aligned pointer 'buf_fx'. Otherwise,
+ * copy the thread's fpu state to the user frame starting at 'buf_fx'.
+ *
+ * If this is a 32-bit frame with fxstate, put a fsave header before
+ * the aligned state at 'buf_fx'.
+ *
+ * For [f]xsave state, update the SW reserved fields in the [f]xsave frame
+ * indicating the absence/presence of the extended state to the user.
+ */
+int copy_fpstate_to_sigframe(void __user *buf, void __user *buf_fx, int size)
+{
+       struct xregs_state *xsave = &current->thread.fpu.state.xsave;
+       struct task_struct *tsk = current;
+       int ia32_fxstate = (buf != buf_fx);
+
+       ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
+                        config_enabled(CONFIG_IA32_EMULATION));
+
+       if (!access_ok(VERIFY_WRITE, buf, size))
+               return -EACCES;
+
+       if (!static_cpu_has(X86_FEATURE_FPU))
+               return fpregs_soft_get(current, NULL, 0,
+                       sizeof(struct user_i387_ia32_struct), NULL,
+                       (struct _fpstate_ia32 __user *) buf) ? -1 : 1;
+
+       if (fpregs_active()) {
+               /* Save the live register state to the user directly. */
+               if (copy_fpregs_to_sigframe(buf_fx))
+                       return -1;
+               /* Update the thread's fxstate to save the fsave header. */
+               if (ia32_fxstate)
+                       copy_fxregs_to_kernel(&tsk->thread.fpu);
+       } else {
+               fpstate_sanitize_xstate(&tsk->thread.fpu);
+               if (__copy_to_user(buf_fx, xsave, xstate_size))
+                       return -1;
+       }
+
+       /* Save the fsave header for the 32-bit frames. */
+       if ((ia32_fxstate || !use_fxsr()) && save_fsave_header(tsk, buf))
+               return -1;
+
+       if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate))
+               return -1;
+
+       return 0;
+}
+
+static inline void
+sanitize_restored_xstate(struct task_struct *tsk,
+                        struct user_i387_ia32_struct *ia32_env,
+                        u64 xfeatures, int fx_only)
+{
+       struct xregs_state *xsave = &tsk->thread.fpu.state.xsave;
+       struct xstate_header *header = &xsave->header;
+
+       if (use_xsave()) {
+               /* These bits must be zero. */
+               memset(header->reserved, 0, 48);
+
+               /*
+                * Init the state that is not present in the memory
+                * layout and not enabled by the OS.
+                */
+               if (fx_only)
+                       header->xfeatures = XSTATE_FPSSE;
+               else
+                       header->xfeatures &= (xfeatures_mask & xfeatures);
+       }
+
+       if (use_fxsr()) {
+               /*
+                * mscsr reserved bits must be masked to zero for security
+                * reasons.
+                */
+               xsave->i387.mxcsr &= mxcsr_feature_mask;
+
+               convert_to_fxsr(tsk, ia32_env);
+       }
+}
+
+/*
+ * Restore the extended state if present. Otherwise, restore the FP/SSE state.
+ */
+static inline int copy_user_to_fpregs_zeroing(void __user *buf, u64 xbv, int fx_only)
+{
+       if (use_xsave()) {
+               if ((unsigned long)buf % 64 || fx_only) {
+                       u64 init_bv = xfeatures_mask & ~XSTATE_FPSSE;
+                       copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
+                       return copy_user_to_fxregs(buf);
+               } else {
+                       u64 init_bv = xfeatures_mask & ~xbv;
+                       if (unlikely(init_bv))
+                               copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
+                       return copy_user_to_xregs(buf, xbv);
+               }
+       } else if (use_fxsr()) {
+               return copy_user_to_fxregs(buf);
+       } else
+               return copy_user_to_fregs(buf);
+}
+
+static int __fpu__restore_sig(void __user *buf, void __user *buf_fx, int size)
+{
+       int ia32_fxstate = (buf != buf_fx);
+       struct task_struct *tsk = current;
+       struct fpu *fpu = &tsk->thread.fpu;
+       int state_size = xstate_size;
+       u64 xfeatures = 0;
+       int fx_only = 0;
+
+       ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
+                        config_enabled(CONFIG_IA32_EMULATION));
+
+       if (!buf) {
+               fpu__clear(fpu);
+               return 0;
+       }
+
+       if (!access_ok(VERIFY_READ, buf, size))
+               return -EACCES;
+
+       fpu__activate_curr(fpu);
+
+       if (!static_cpu_has(X86_FEATURE_FPU))
+               return fpregs_soft_set(current, NULL,
+                                      0, sizeof(struct user_i387_ia32_struct),
+                                      NULL, buf) != 0;
+
+       if (use_xsave()) {
+               struct _fpx_sw_bytes fx_sw_user;
+               if (unlikely(check_for_xstate(buf_fx, buf_fx, &fx_sw_user))) {
+                       /*
+                        * Couldn't find the extended state information in the
+                        * memory layout. Restore just the FP/SSE and init all
+                        * the other extended state.
+                        */
+                       state_size = sizeof(struct fxregs_state);
+                       fx_only = 1;
+               } else {
+                       state_size = fx_sw_user.xstate_size;
+                       xfeatures = fx_sw_user.xfeatures;
+               }
+       }
+
+       if (ia32_fxstate) {
+               /*
+                * For 32-bit frames with fxstate, copy the user state to the
+                * thread's fpu state, reconstruct fxstate from the fsave
+                * header. Sanitize the copied state etc.
+                */
+               struct fpu *fpu = &tsk->thread.fpu;
+               struct user_i387_ia32_struct env;
+               int err = 0;
+
+               /*
+                * Drop the current fpu which clears fpu->fpstate_active. This ensures
+                * that any context-switch during the copy of the new state,
+                * avoids the intermediate state from getting restored/saved.
+                * Thus avoiding the new restored state from getting corrupted.
+                * We will be ready to restore/save the state only after
+                * fpu->fpstate_active is again set.
+                */
+               fpu__drop(fpu);
+
+               if (__copy_from_user(&fpu->state.xsave, buf_fx, state_size) ||
+                   __copy_from_user(&env, buf, sizeof(env))) {
+                       fpstate_init(&fpu->state);
+                       err = -1;
+               } else {
+                       sanitize_restored_xstate(tsk, &env, xfeatures, fx_only);
+               }
+
+               fpu->fpstate_active = 1;
+               if (use_eager_fpu()) {
+                       preempt_disable();
+                       fpu__restore(fpu);
+                       preempt_enable();
+               }
+
+               return err;
+       } else {
+               /*
+                * For 64-bit frames and 32-bit fsave frames, restore the user
+                * state to the registers directly (with exceptions handled).
+                */
+               user_fpu_begin();
+               if (copy_user_to_fpregs_zeroing(buf_fx, xfeatures, fx_only)) {
+                       fpu__clear(fpu);
+                       return -1;
+               }
+       }
+
+       return 0;
+}
+
+static inline int xstate_sigframe_size(void)
+{
+       return use_xsave() ? xstate_size + FP_XSTATE_MAGIC2_SIZE : xstate_size;
+}
+
+/*
+ * Restore FPU state from a sigframe:
+ */
+int fpu__restore_sig(void __user *buf, int ia32_frame)
+{
+       void __user *buf_fx = buf;
+       int size = xstate_sigframe_size();
+
+       if (ia32_frame && use_fxsr()) {
+               buf_fx = buf + sizeof(struct fregs_state);
+               size += sizeof(struct fregs_state);
+       }
+
+       return __fpu__restore_sig(buf, buf_fx, size);
+}
+
+unsigned long
+fpu__alloc_mathframe(unsigned long sp, int ia32_frame,
+                    unsigned long *buf_fx, unsigned long *size)
+{
+       unsigned long frame_size = xstate_sigframe_size();
+
+       *buf_fx = sp = round_down(sp - frame_size, 64);
+       if (ia32_frame && use_fxsr()) {
+               frame_size += sizeof(struct fregs_state);
+               sp -= sizeof(struct fregs_state);
+       }
+
+       *size = frame_size;
+
+       return sp;
+}
+/*
+ * Prepare the SW reserved portion of the fxsave memory layout, indicating
+ * the presence of the extended state information in the memory layout
+ * pointed by the fpstate pointer in the sigcontext.
+ * This will be saved when ever the FP and extended state context is
+ * saved on the user stack during the signal handler delivery to the user.
+ */
+void fpu__init_prepare_fx_sw_frame(void)
+{
+       int fsave_header_size = sizeof(struct fregs_state);
+       int size = xstate_size + FP_XSTATE_MAGIC2_SIZE;
+
+       if (config_enabled(CONFIG_X86_32))
+               size += fsave_header_size;
+
+       fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
+       fx_sw_reserved.extended_size = size;
+       fx_sw_reserved.xfeatures = xfeatures_mask;
+       fx_sw_reserved.xstate_size = xstate_size;
+
+       if (config_enabled(CONFIG_IA32_EMULATION)) {
+               fx_sw_reserved_ia32 = fx_sw_reserved;
+               fx_sw_reserved_ia32.extended_size += fsave_header_size;
+       }
+}
+
diff --git a/arch/x86/kernel/fpu/xstate.c b/arch/x86/kernel/fpu/xstate.c
new file mode 100644 (file)
index 0000000..62fc001
--- /dev/null
@@ -0,0 +1,461 @@
+/*
+ * xsave/xrstor support.
+ *
+ * Author: Suresh Siddha <suresh.b.siddha@intel.com>
+ */
+#include <linux/compat.h>
+#include <linux/cpu.h>
+
+#include <asm/fpu/api.h>
+#include <asm/fpu/internal.h>
+#include <asm/fpu/signal.h>
+#include <asm/fpu/regset.h>
+
+#include <asm/tlbflush.h>
+
+static const char *xfeature_names[] =
+{
+       "x87 floating point registers"  ,
+       "SSE registers"                 ,
+       "AVX registers"                 ,
+       "MPX bounds registers"          ,
+       "MPX CSR"                       ,
+       "AVX-512 opmask"                ,
+       "AVX-512 Hi256"                 ,
+       "AVX-512 ZMM_Hi256"             ,
+       "unknown xstate feature"        ,
+};
+
+/*
+ * Mask of xstate features supported by the CPU and the kernel:
+ */
+u64 xfeatures_mask __read_mostly;
+
+static unsigned int xstate_offsets[XFEATURES_NR_MAX] = { [ 0 ... XFEATURES_NR_MAX - 1] = -1};
+static unsigned int xstate_sizes[XFEATURES_NR_MAX]   = { [ 0 ... XFEATURES_NR_MAX - 1] = -1};
+static unsigned int xstate_comp_offsets[sizeof(xfeatures_mask)*8];
+
+/* The number of supported xfeatures in xfeatures_mask: */
+static unsigned int xfeatures_nr;
+
+/*
+ * Return whether the system supports a given xfeature.
+ *
+ * Also return the name of the (most advanced) feature that the caller requested:
+ */
+int cpu_has_xfeatures(u64 xfeatures_needed, const char **feature_name)
+{
+       u64 xfeatures_missing = xfeatures_needed & ~xfeatures_mask;
+
+       if (unlikely(feature_name)) {
+               long xfeature_idx, max_idx;
+               u64 xfeatures_print;
+               /*
+                * So we use FLS here to be able to print the most advanced
+                * feature that was requested but is missing. So if a driver
+                * asks about "XSTATE_SSE | XSTATE_YMM" we'll print the
+                * missing AVX feature - this is the most informative message
+                * to users:
+                */
+               if (xfeatures_missing)
+                       xfeatures_print = xfeatures_missing;
+               else
+                       xfeatures_print = xfeatures_needed;
+
+               xfeature_idx = fls64(xfeatures_print)-1;
+               max_idx = ARRAY_SIZE(xfeature_names)-1;
+               xfeature_idx = min(xfeature_idx, max_idx);
+
+               *feature_name = xfeature_names[xfeature_idx];
+       }
+
+       if (xfeatures_missing)
+               return 0;
+
+       return 1;
+}
+EXPORT_SYMBOL_GPL(cpu_has_xfeatures);
+
+/*
+ * When executing XSAVEOPT (or other optimized XSAVE instructions), if
+ * a processor implementation detects that an FPU state component is still
+ * (or is again) in its initialized state, it may clear the corresponding
+ * bit in the header.xfeatures field, and can skip the writeout of registers
+ * to the corresponding memory layout.
+ *
+ * This means that when the bit is zero, the state component might still contain
+ * some previous - non-initialized register state.
+ *
+ * Before writing xstate information to user-space we sanitize those components,
+ * to always ensure that the memory layout of a feature will be in the init state
+ * if the corresponding header bit is zero. This is to ensure that user-space doesn't
+ * see some stale state in the memory layout during signal handling, debugging etc.
+ */
+void fpstate_sanitize_xstate(struct fpu *fpu)
+{
+       struct fxregs_state *fx = &fpu->state.fxsave;
+       int feature_bit;
+       u64 xfeatures;
+
+       if (!use_xsaveopt())
+               return;
+
+       xfeatures = fpu->state.xsave.header.xfeatures;
+
+       /*
+        * None of the feature bits are in init state. So nothing else
+        * to do for us, as the memory layout is up to date.
+        */
+       if ((xfeatures & xfeatures_mask) == xfeatures_mask)
+               return;
+
+       /*
+        * FP is in init state
+        */
+       if (!(xfeatures & XSTATE_FP)) {
+               fx->cwd = 0x37f;
+               fx->swd = 0;
+               fx->twd = 0;
+               fx->fop = 0;
+               fx->rip = 0;
+               fx->rdp = 0;
+               memset(&fx->st_space[0], 0, 128);
+       }
+
+       /*
+        * SSE is in init state
+        */
+       if (!(xfeatures & XSTATE_SSE))
+               memset(&fx->xmm_space[0], 0, 256);
+
+       /*
+        * First two features are FPU and SSE, which above we handled
+        * in a special way already:
+        */
+       feature_bit = 0x2;
+       xfeatures = (xfeatures_mask & ~xfeatures) >> 2;
+
+       /*
+        * Update all the remaining memory layouts according to their
+        * standard xstate layout, if their header bit is in the init
+        * state:
+        */
+       while (xfeatures) {
+               if (xfeatures & 0x1) {
+                       int offset = xstate_offsets[feature_bit];
+                       int size = xstate_sizes[feature_bit];
+
+                       memcpy((void *)fx + offset,
+                              (void *)&init_fpstate.xsave + offset,
+                              size);
+               }
+
+               xfeatures >>= 1;
+               feature_bit++;
+       }
+}
+
+/*
+ * Enable the extended processor state save/restore feature.
+ * Called once per CPU onlining.
+ */
+void fpu__init_cpu_xstate(void)
+{
+       if (!cpu_has_xsave || !xfeatures_mask)
+               return;
+
+       cr4_set_bits(X86_CR4_OSXSAVE);
+       xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask);
+}
+
+/*
+ * Record the offsets and sizes of various xstates contained
+ * in the XSAVE state memory layout.
+ *
+ * ( Note that certain features might be non-present, for them
+ *   we'll have 0 offset and 0 size. )
+ */
+static void __init setup_xstate_features(void)
+{
+       u32 eax, ebx, ecx, edx, leaf;
+
+       xfeatures_nr = fls64(xfeatures_mask);
+
+       for (leaf = 2; leaf < xfeatures_nr; leaf++) {
+               cpuid_count(XSTATE_CPUID, leaf, &eax, &ebx, &ecx, &edx);
+
+               xstate_offsets[leaf] = ebx;
+               xstate_sizes[leaf] = eax;
+
+               printk(KERN_INFO "x86/fpu: xstate_offset[%d]: %04x, xstate_sizes[%d]: %04x\n", leaf, ebx, leaf, eax);
+       }
+}
+
+static void __init print_xstate_feature(u64 xstate_mask)
+{
+       const char *feature_name;
+
+       if (cpu_has_xfeatures(xstate_mask, &feature_name))
+               pr_info("x86/fpu: Supporting XSAVE feature 0x%02Lx: '%s'\n", xstate_mask, feature_name);
+}
+
+/*
+ * Print out all the supported xstate features:
+ */
+static void __init print_xstate_features(void)
+{
+       print_xstate_feature(XSTATE_FP);
+       print_xstate_feature(XSTATE_SSE);
+       print_xstate_feature(XSTATE_YMM);
+       print_xstate_feature(XSTATE_BNDREGS);
+       print_xstate_feature(XSTATE_BNDCSR);
+       print_xstate_feature(XSTATE_OPMASK);
+       print_xstate_feature(XSTATE_ZMM_Hi256);
+       print_xstate_feature(XSTATE_Hi16_ZMM);
+}
+
+/*
+ * This function sets up offsets and sizes of all extended states in
+ * xsave area. This supports both standard format and compacted format
+ * of the xsave aread.
+ */
+static void __init setup_xstate_comp(void)
+{
+       unsigned int xstate_comp_sizes[sizeof(xfeatures_mask)*8];
+       int i;
+
+       /*
+        * The FP xstates and SSE xstates are legacy states. They are always
+        * in the fixed offsets in the xsave area in either compacted form
+        * or standard form.
+        */
+       xstate_comp_offsets[0] = 0;
+       xstate_comp_offsets[1] = offsetof(struct fxregs_state, xmm_space);
+
+       if (!cpu_has_xsaves) {
+               for (i = 2; i < xfeatures_nr; i++) {
+                       if (test_bit(i, (unsigned long *)&xfeatures_mask)) {
+                               xstate_comp_offsets[i] = xstate_offsets[i];
+                               xstate_comp_sizes[i] = xstate_sizes[i];
+                       }
+               }
+               return;
+       }
+
+       xstate_comp_offsets[2] = FXSAVE_SIZE + XSAVE_HDR_SIZE;
+
+       for (i = 2; i < xfeatures_nr; i++) {
+               if (test_bit(i, (unsigned long *)&xfeatures_mask))
+                       xstate_comp_sizes[i] = xstate_sizes[i];
+               else
+                       xstate_comp_sizes[i] = 0;
+
+               if (i > 2)
+                       xstate_comp_offsets[i] = xstate_comp_offsets[i-1]
+                                       + xstate_comp_sizes[i-1];
+
+       }
+}
+
+/*
+ * setup the xstate image representing the init state
+ */
+static void __init setup_init_fpu_buf(void)
+{
+       static int on_boot_cpu = 1;
+
+       WARN_ON_FPU(!on_boot_cpu);
+       on_boot_cpu = 0;
+
+       if (!cpu_has_xsave)
+               return;
+
+       setup_xstate_features();
+       print_xstate_features();
+
+       if (cpu_has_xsaves) {
+               init_fpstate.xsave.header.xcomp_bv = (u64)1 << 63 | xfeatures_mask;
+               init_fpstate.xsave.header.xfeatures = xfeatures_mask;
+       }
+
+       /*
+        * Init all the features state with header_bv being 0x0
+        */
+       copy_kernel_to_xregs_booting(&init_fpstate.xsave);
+
+       /*
+        * Dump the init state again. This is to identify the init state
+        * of any feature which is not represented by all zero's.
+        */
+       copy_xregs_to_kernel_booting(&init_fpstate.xsave);
+}
+
+/*
+ * Calculate total size of enabled xstates in XCR0/xfeatures_mask.
+ */
+static void __init init_xstate_size(void)
+{
+       unsigned int eax, ebx, ecx, edx;
+       int i;
+
+       if (!cpu_has_xsaves) {
+               cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
+               xstate_size = ebx;
+               return;
+       }
+
+       xstate_size = FXSAVE_SIZE + XSAVE_HDR_SIZE;
+       for (i = 2; i < 64; i++) {
+               if (test_bit(i, (unsigned long *)&xfeatures_mask)) {
+                       cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
+                       xstate_size += eax;
+               }
+       }
+}
+
+/*
+ * Enable and initialize the xsave feature.
+ * Called once per system bootup.
+ */
+void __init fpu__init_system_xstate(void)
+{
+       unsigned int eax, ebx, ecx, edx;
+       static int on_boot_cpu = 1;
+
+       WARN_ON_FPU(!on_boot_cpu);
+       on_boot_cpu = 0;
+
+       if (!cpu_has_xsave) {
+               pr_info("x86/fpu: Legacy x87 FPU detected.\n");
+               return;
+       }
+
+       if (boot_cpu_data.cpuid_level < XSTATE_CPUID) {
+               WARN_ON_FPU(1);
+               return;
+       }
+
+       cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
+       xfeatures_mask = eax + ((u64)edx << 32);
+
+       if ((xfeatures_mask & XSTATE_FPSSE) != XSTATE_FPSSE) {
+               pr_err("x86/fpu: FP/SSE not present amongst the CPU's xstate features: 0x%llx.\n", xfeatures_mask);
+               BUG();
+       }
+
+       /* Support only the state known to the OS: */
+       xfeatures_mask = xfeatures_mask & XCNTXT_MASK;
+
+       /* Enable xstate instructions to be able to continue with initialization: */
+       fpu__init_cpu_xstate();
+
+       /* Recompute the context size for enabled features: */
+       init_xstate_size();
+
+       update_regset_xstate_info(xstate_size, xfeatures_mask);
+       fpu__init_prepare_fx_sw_frame();
+       setup_init_fpu_buf();
+       setup_xstate_comp();
+
+       pr_info("x86/fpu: Enabled xstate features 0x%llx, context size is 0x%x bytes, using '%s' format.\n",
+               xfeatures_mask,
+               xstate_size,
+               cpu_has_xsaves ? "compacted" : "standard");
+}
+
+/*
+ * Restore minimal FPU state after suspend:
+ */
+void fpu__resume_cpu(void)
+{
+       /*
+        * Restore XCR0 on xsave capable CPUs:
+        */
+       if (cpu_has_xsave)
+               xsetbv(XCR_XFEATURE_ENABLED_MASK, xfeatures_mask);
+}
+
+/*
+ * Given the xsave area and a state inside, this function returns the
+ * address of the state.
+ *
+ * This is the API that is called to get xstate address in either
+ * standard format or compacted format of xsave area.
+ *
+ * Note that if there is no data for the field in the xsave buffer
+ * this will return NULL.
+ *
+ * Inputs:
+ *     xstate: the thread's storage area for all FPU data
+ *     xstate_feature: state which is defined in xsave.h (e.g.
+ *     XSTATE_FP, XSTATE_SSE, etc...)
+ * Output:
+ *     address of the state in the xsave area, or NULL if the
+ *     field is not present in the xsave buffer.
+ */
+void *get_xsave_addr(struct xregs_state *xsave, int xstate_feature)
+{
+       int feature_nr = fls64(xstate_feature) - 1;
+       /*
+        * Do we even *have* xsave state?
+        */
+       if (!boot_cpu_has(X86_FEATURE_XSAVE))
+               return NULL;
+
+       xsave = &current->thread.fpu.state.xsave;
+       /*
+        * We should not ever be requesting features that we
+        * have not enabled.  Remember that pcntxt_mask is
+        * what we write to the XCR0 register.
+        */
+       WARN_ONCE(!(xfeatures_mask & xstate_feature),
+                 "get of unsupported state");
+       /*
+        * This assumes the last 'xsave*' instruction to
+        * have requested that 'xstate_feature' be saved.
+        * If it did not, we might be seeing and old value
+        * of the field in the buffer.
+        *
+        * This can happen because the last 'xsave' did not
+        * request that this feature be saved (unlikely)
+        * or because the "init optimization" caused it
+        * to not be saved.
+        */
+       if (!(xsave->header.xfeatures & xstate_feature))
+               return NULL;
+
+       return (void *)xsave + xstate_comp_offsets[feature_nr];
+}
+EXPORT_SYMBOL_GPL(get_xsave_addr);
+
+/*
+ * This wraps up the common operations that need to occur when retrieving
+ * data from xsave state.  It first ensures that the current task was
+ * using the FPU and retrieves the data in to a buffer.  It then calculates
+ * the offset of the requested field in the buffer.
+ *
+ * This function is safe to call whether the FPU is in use or not.
+ *
+ * Note that this only works on the current task.
+ *
+ * Inputs:
+ *     @xsave_state: state which is defined in xsave.h (e.g. XSTATE_FP,
+ *     XSTATE_SSE, etc...)
+ * Output:
+ *     address of the state in the xsave area or NULL if the state
+ *     is not present or is in its 'init state'.
+ */
+const void *get_xsave_field_ptr(int xsave_state)
+{
+       struct fpu *fpu = &current->thread.fpu;
+
+       if (!fpu->fpstate_active)
+               return NULL;
+       /*
+        * fpu__save() takes the CPU's xstate registers
+        * and saves them off to the 'fpu memory buffer.
+        */
+       fpu__save(fpu);
+
+       return get_xsave_addr(&fpu->state.xsave, xsave_state);
+}
index 2b55ee6db053c79fbe91a6119e613075be54111b..5a4668136e9892b6b8695d1d82edf86afbdea0a0 100644 (file)
@@ -167,7 +167,7 @@ asmlinkage __visible void __init x86_64_start_kernel(char * real_mode_data)
        clear_bss();
 
        for (i = 0; i < NUM_EXCEPTION_VECTORS; i++)
-               set_intr_gate(i, early_idt_handlers[i]);
+               set_intr_gate(i, early_idt_handler_array[i]);
        load_idt((const struct desc_ptr *)&idt_descr);
 
        copy_bootdata(__va(real_mode_data));
index d031bad9e07eadf3a80bc69a449cd13a44ed8080..0e2d96ffd158d0e5f4c1d355040cd9b285ef84d6 100644 (file)
 #define PAGE_TABLE_SIZE(pages) ((pages) / PTRS_PER_PGD)
 #endif
 
-/* Number of possible pages in the lowmem region */
-LOWMEM_PAGES = (((1<<32) - __PAGE_OFFSET) >> PAGE_SHIFT)
-       
+/*
+ * Number of possible pages in the lowmem region.
+ *
+ * We shift 2 by 31 instead of 1 by 32 to the left in order to avoid a
+ * gas warning about overflowing shift count when gas has been compiled
+ * with only a host target support using a 32-bit type for internal
+ * representation.
+ */
+LOWMEM_PAGES = (((2<<31) - __PAGE_OFFSET) >> PAGE_SHIFT)
+
 /* Enough space to fit pagetables for the low memory linear map */
 MAPPING_BEYOND_END = PAGE_TABLE_SIZE(LOWMEM_PAGES) << PAGE_SHIFT
 
@@ -478,21 +485,22 @@ is486:
 __INIT
 setup_once:
        /*
-        * Set up a idt with 256 entries pointing to ignore_int,
-        * interrupt gates. It doesn't actually load idt - that needs
-        * to be done on each CPU. Interrupts are enabled elsewhere,
-        * when we can be relatively sure everything is ok.
+        * Set up a idt with 256 interrupt gates that push zero if there
+        * is no error code and then jump to early_idt_handler_common.
+        * It doesn't actually load the idt - that needs to be done on
+        * each CPU. Interrupts are enabled elsewhere, when we can be
+        * relatively sure everything is ok.
         */
 
        movl $idt_table,%edi
-       movl $early_idt_handlers,%eax
+       movl $early_idt_handler_array,%eax
        movl $NUM_EXCEPTION_VECTORS,%ecx
 1:
        movl %eax,(%edi)
        movl %eax,4(%edi)
        /* interrupt gate, dpl=0, present */
        movl $(0x8E000000 + __KERNEL_CS),2(%edi)
-       addl $9,%eax
+       addl $EARLY_IDT_HANDLER_SIZE,%eax
        addl $8,%edi
        loop 1b
 
@@ -524,30 +532,32 @@ setup_once:
        andl $0,setup_once_ref  /* Once is enough, thanks */
        ret
 
-ENTRY(early_idt_handlers)
+ENTRY(early_idt_handler_array)
        # 36(%esp) %eflags
        # 32(%esp) %cs
        # 28(%esp) %eip
        # 24(%rsp) error code
        i = 0
        .rept NUM_EXCEPTION_VECTORS
-       .if (EXCEPTION_ERRCODE_MASK >> i) & 1
-       ASM_NOP2
-       .else
+       .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
        pushl $0                # Dummy error code, to make stack frame uniform
        .endif
        pushl $i                # 20(%esp) Vector number
-       jmp early_idt_handler
+       jmp early_idt_handler_common
        i = i + 1
+       .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
        .endr
-ENDPROC(early_idt_handlers)
+ENDPROC(early_idt_handler_array)
        
-       /* This is global to keep gas from relaxing the jumps */
-ENTRY(early_idt_handler)
+early_idt_handler_common:
+       /*
+        * The stack is the hardware frame, an error code or zero, and the
+        * vector number.
+        */
        cld
 
        cmpl $2,(%esp)          # X86_TRAP_NMI
-       je is_nmi               # Ignore NMI
+       je .Lis_nmi             # Ignore NMI
 
        cmpl $2,%ss:early_recursion_flag
        je hlt_loop
@@ -600,10 +610,10 @@ ex_entry:
        pop %ecx
        pop %eax
        decl %ss:early_recursion_flag
-is_nmi:
+.Lis_nmi:
        addl $8,%esp            /* drop vector number and error code */
        iret
-ENDPROC(early_idt_handler)
+ENDPROC(early_idt_handler_common)
 
 /* This is the default interrupt "handler" :-) */
        ALIGN
index ae6588b301c248b3c281a1e072802e6764e9ac44..e5c27f729a3840b2755533cb597fbf702de07621 100644 (file)
@@ -321,30 +321,32 @@ bad_address:
        jmp bad_address
 
        __INIT
-       .globl early_idt_handlers
-early_idt_handlers:
+ENTRY(early_idt_handler_array)
        # 104(%rsp) %rflags
        #  96(%rsp) %cs
        #  88(%rsp) %rip
        #  80(%rsp) error code
        i = 0
        .rept NUM_EXCEPTION_VECTORS
-       .if (EXCEPTION_ERRCODE_MASK >> i) & 1
-       ASM_NOP2
-       .else
+       .ifeq (EXCEPTION_ERRCODE_MASK >> i) & 1
        pushq $0                # Dummy error code, to make stack frame uniform
        .endif
        pushq $i                # 72(%rsp) Vector number
-       jmp early_idt_handler
+       jmp early_idt_handler_common
        i = i + 1
+       .fill early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
        .endr
+ENDPROC(early_idt_handler_array)
 
-/* This is global to keep gas from relaxing the jumps */
-ENTRY(early_idt_handler)
+early_idt_handler_common:
+       /*
+        * The stack is the hardware frame, an error code or zero, and the
+        * vector number.
+        */
        cld
 
        cmpl $2,(%rsp)          # X86_TRAP_NMI
-       je is_nmi               # Ignore NMI
+       je .Lis_nmi             # Ignore NMI
 
        cmpl $2,early_recursion_flag(%rip)
        jz  1f
@@ -409,10 +411,10 @@ ENTRY(early_idt_handler)
        popq %rcx
        popq %rax
        decl early_recursion_flag(%rip)
-is_nmi:
+.Lis_nmi:
        addq $16,%rsp           # drop vector number and error code
        INTERRUPT_RETURN
-ENDPROC(early_idt_handler)
+ENDPROC(early_idt_handler_common)
 
        __INITDATA
 
index 3acbff4716b088ded2e1d237106872d5d1d1a7f0..10757d0a3fcf438e43ffbfc634e7daa8dc110a2e 100644 (file)
@@ -12,6 +12,7 @@
 #include <linux/pm.h>
 #include <linux/io.h>
 
+#include <asm/irqdomain.h>
 #include <asm/fixmap.h>
 #include <asm/hpet.h>
 #include <asm/time.h>
@@ -305,8 +306,6 @@ static void hpet_legacy_clockevent_register(void)
        printk(KERN_DEBUG "hpet clockevent registered\n");
 }
 
-static int hpet_setup_msi_irq(unsigned int irq);
-
 static void hpet_set_mode(enum clock_event_mode mode,
                          struct clock_event_device *evt, int timer)
 {
@@ -357,7 +356,7 @@ static void hpet_set_mode(enum clock_event_mode mode,
                        hpet_enable_legacy_int();
                } else {
                        struct hpet_dev *hdev = EVT_TO_HPET_DEV(evt);
-                       hpet_setup_msi_irq(hdev->irq);
+                       irq_domain_activate_irq(irq_get_irq_data(hdev->irq));
                        disable_irq(hdev->irq);
                        irq_set_affinity(hdev->irq, cpumask_of(hdev->cpu));
                        enable_irq(hdev->irq);
@@ -423,6 +422,7 @@ static int hpet_legacy_next_event(unsigned long delta,
 
 static DEFINE_PER_CPU(struct hpet_dev *, cpu_hpet_dev);
 static struct hpet_dev *hpet_devs;
+static struct irq_domain *hpet_domain;
 
 void hpet_msi_unmask(struct irq_data *data)
 {
@@ -473,31 +473,6 @@ static int hpet_msi_next_event(unsigned long delta,
        return hpet_next_event(delta, evt, hdev->num);
 }
 
-static int hpet_setup_msi_irq(unsigned int irq)
-{
-       if (x86_msi.setup_hpet_msi(irq, hpet_blockid)) {
-               irq_free_hwirq(irq);
-               return -EINVAL;
-       }
-       return 0;
-}
-
-static int hpet_assign_irq(struct hpet_dev *dev)
-{
-       unsigned int irq = irq_alloc_hwirq(-1);
-
-       if (!irq)
-               return -EINVAL;
-
-       irq_set_handler_data(irq, dev);
-
-       if (hpet_setup_msi_irq(irq))
-               return -EINVAL;
-
-       dev->irq = irq;
-       return 0;
-}
-
 static irqreturn_t hpet_interrupt_handler(int irq, void *data)
 {
        struct hpet_dev *dev = (struct hpet_dev *)data;
@@ -540,9 +515,6 @@ static void init_one_hpet_msi_clockevent(struct hpet_dev *hdev, int cpu)
        if (!(hdev->flags & HPET_DEV_VALID))
                return;
 
-       if (hpet_setup_msi_irq(hdev->irq))
-               return;
-
        hdev->cpu = cpu;
        per_cpu(cpu_hpet_dev, cpu) = hdev;
        evt->name = hdev->name;
@@ -574,7 +546,7 @@ static void hpet_msi_capability_lookup(unsigned int start_timer)
        unsigned int id;
        unsigned int num_timers;
        unsigned int num_timers_used = 0;
-       int i;
+       int i, irq;
 
        if (hpet_msi_disable)
                return;
@@ -587,6 +559,10 @@ static void hpet_msi_capability_lookup(unsigned int start_timer)
        num_timers++; /* Value read out starts from 0 */
        hpet_print_config();
 
+       hpet_domain = hpet_create_irq_domain(hpet_blockid);
+       if (!hpet_domain)
+               return;
+
        hpet_devs = kzalloc(sizeof(struct hpet_dev) * num_timers, GFP_KERNEL);
        if (!hpet_devs)
                return;
@@ -604,12 +580,14 @@ static void hpet_msi_capability_lookup(unsigned int start_timer)
                hdev->flags = 0;
                if (cfg & HPET_TN_PERIODIC_CAP)
                        hdev->flags |= HPET_DEV_PERI_CAP;
+               sprintf(hdev->name, "hpet%d", i);
                hdev->num = i;
 
-               sprintf(hdev->name, "hpet%d", i);
-               if (hpet_assign_irq(hdev))
+               irq = hpet_assign_irq(hpet_domain, hdev, hdev->num);
+               if (irq <= 0)
                        continue;
 
+               hdev->irq = irq;
                hdev->flags |= HPET_DEV_FSB_CAP;
                hdev->flags |= HPET_DEV_VALID;
                num_timers_used++;
@@ -709,10 +687,6 @@ static int hpet_cpuhp_notify(struct notifier_block *n,
 }
 #else
 
-static int hpet_setup_msi_irq(unsigned int irq)
-{
-       return 0;
-}
 static void hpet_msi_capability_lookup(unsigned int start_timer)
 {
        return;
index 05fd74f537d62122ade73f53dad17c97346c7a80..64341aa485ae1ad6ab62c07984c9a70dadd44c64 100644 (file)
@@ -40,7 +40,5 @@ EXPORT_SYMBOL(empty_zero_page);
 
 #ifdef CONFIG_PREEMPT
 EXPORT_SYMBOL(___preempt_schedule);
-#ifdef CONFIG_CONTEXT_TRACKING
-EXPORT_SYMBOL(___preempt_schedule_context);
-#endif
+EXPORT_SYMBOL(___preempt_schedule_notrace);
 #endif
diff --git a/arch/x86/kernel/i387.c b/arch/x86/kernel/i387.c
deleted file mode 100644 (file)
index 0091832..0000000
+++ /dev/null
@@ -1,656 +0,0 @@
-/*
- *  Copyright (C) 1994 Linus Torvalds
- *
- *  Pentium III FXSR, SSE support
- *  General FPU state handling cleanups
- *     Gareth Hughes <gareth@valinux.com>, May 2000
- */
-#include <linux/module.h>
-#include <linux/regset.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-
-#include <asm/sigcontext.h>
-#include <asm/processor.h>
-#include <asm/math_emu.h>
-#include <asm/tlbflush.h>
-#include <asm/uaccess.h>
-#include <asm/ptrace.h>
-#include <asm/i387.h>
-#include <asm/fpu-internal.h>
-#include <asm/user.h>
-
-static DEFINE_PER_CPU(bool, in_kernel_fpu);
-
-void kernel_fpu_disable(void)
-{
-       WARN_ON(this_cpu_read(in_kernel_fpu));
-       this_cpu_write(in_kernel_fpu, true);
-}
-
-void kernel_fpu_enable(void)
-{
-       this_cpu_write(in_kernel_fpu, false);
-}
-
-/*
- * Were we in an interrupt that interrupted kernel mode?
- *
- * On others, we can do a kernel_fpu_begin/end() pair *ONLY* if that
- * pair does nothing at all: the thread must not have fpu (so
- * that we don't try to save the FPU state), and TS must
- * be set (so that the clts/stts pair does nothing that is
- * visible in the interrupted kernel thread).
- *
- * Except for the eagerfpu case when we return true; in the likely case
- * the thread has FPU but we are not going to set/clear TS.
- */
-static inline bool interrupted_kernel_fpu_idle(void)
-{
-       if (this_cpu_read(in_kernel_fpu))
-               return false;
-
-       if (use_eager_fpu())
-               return true;
-
-       return !__thread_has_fpu(current) &&
-               (read_cr0() & X86_CR0_TS);
-}
-
-/*
- * Were we in user mode (or vm86 mode) when we were
- * interrupted?
- *
- * Doing kernel_fpu_begin/end() is ok if we are running
- * in an interrupt context from user mode - we'll just
- * save the FPU state as required.
- */
-static inline bool interrupted_user_mode(void)
-{
-       struct pt_regs *regs = get_irq_regs();
-       return regs && user_mode(regs);
-}
-
-/*
- * Can we use the FPU in kernel mode with the
- * whole "kernel_fpu_begin/end()" sequence?
- *
- * It's always ok in process context (ie "not interrupt")
- * but it is sometimes ok even from an irq.
- */
-bool irq_fpu_usable(void)
-{
-       return !in_interrupt() ||
-               interrupted_user_mode() ||
-               interrupted_kernel_fpu_idle();
-}
-EXPORT_SYMBOL(irq_fpu_usable);
-
-void __kernel_fpu_begin(void)
-{
-       struct task_struct *me = current;
-
-       this_cpu_write(in_kernel_fpu, true);
-
-       if (__thread_has_fpu(me)) {
-               __save_init_fpu(me);
-       } else {
-               this_cpu_write(fpu_owner_task, NULL);
-               if (!use_eager_fpu())
-                       clts();
-       }
-}
-EXPORT_SYMBOL(__kernel_fpu_begin);
-
-void __kernel_fpu_end(void)
-{
-       struct task_struct *me = current;
-
-       if (__thread_has_fpu(me)) {
-               if (WARN_ON(restore_fpu_checking(me)))
-                       fpu_reset_state(me);
-       } else if (!use_eager_fpu()) {
-               stts();
-       }
-
-       this_cpu_write(in_kernel_fpu, false);
-}
-EXPORT_SYMBOL(__kernel_fpu_end);
-
-void unlazy_fpu(struct task_struct *tsk)
-{
-       preempt_disable();
-       if (__thread_has_fpu(tsk)) {
-               if (use_eager_fpu()) {
-                       __save_fpu(tsk);
-               } else {
-                       __save_init_fpu(tsk);
-                       __thread_fpu_end(tsk);
-               }
-       }
-       preempt_enable();
-}
-EXPORT_SYMBOL(unlazy_fpu);
-
-unsigned int mxcsr_feature_mask __read_mostly = 0xffffffffu;
-unsigned int xstate_size;
-EXPORT_SYMBOL_GPL(xstate_size);
-static struct i387_fxsave_struct fx_scratch;
-
-static void mxcsr_feature_mask_init(void)
-{
-       unsigned long mask = 0;
-
-       if (cpu_has_fxsr) {
-               memset(&fx_scratch, 0, sizeof(struct i387_fxsave_struct));
-               asm volatile("fxsave %0" : "+m" (fx_scratch));
-               mask = fx_scratch.mxcsr_mask;
-               if (mask == 0)
-                       mask = 0x0000ffbf;
-       }
-       mxcsr_feature_mask &= mask;
-}
-
-static void init_thread_xstate(void)
-{
-       /*
-        * Note that xstate_size might be overwriten later during
-        * xsave_init().
-        */
-
-       if (!cpu_has_fpu) {
-               /*
-                * Disable xsave as we do not support it if i387
-                * emulation is enabled.
-                */
-               setup_clear_cpu_cap(X86_FEATURE_XSAVE);
-               setup_clear_cpu_cap(X86_FEATURE_XSAVEOPT);
-               xstate_size = sizeof(struct i387_soft_struct);
-               return;
-       }
-
-       if (cpu_has_fxsr)
-               xstate_size = sizeof(struct i387_fxsave_struct);
-       else
-               xstate_size = sizeof(struct i387_fsave_struct);
-}
-
-/*
- * Called at bootup to set up the initial FPU state that is later cloned
- * into all processes.
- */
-
-void fpu_init(void)
-{
-       unsigned long cr0;
-       unsigned long cr4_mask = 0;
-
-#ifndef CONFIG_MATH_EMULATION
-       if (!cpu_has_fpu) {
-               pr_emerg("No FPU found and no math emulation present\n");
-               pr_emerg("Giving up\n");
-               for (;;)
-                       asm volatile("hlt");
-       }
-#endif
-       if (cpu_has_fxsr)
-               cr4_mask |= X86_CR4_OSFXSR;
-       if (cpu_has_xmm)
-               cr4_mask |= X86_CR4_OSXMMEXCPT;
-       if (cr4_mask)
-               cr4_set_bits(cr4_mask);
-
-       cr0 = read_cr0();
-       cr0 &= ~(X86_CR0_TS|X86_CR0_EM); /* clear TS and EM */
-       if (!cpu_has_fpu)
-               cr0 |= X86_CR0_EM;
-       write_cr0(cr0);
-
-       /*
-        * init_thread_xstate is only called once to avoid overriding
-        * xstate_size during boot time or during CPU hotplug.
-        */
-       if (xstate_size == 0)
-               init_thread_xstate();
-
-       mxcsr_feature_mask_init();
-       xsave_init();
-       eager_fpu_init();
-}
-
-void fpu_finit(struct fpu *fpu)
-{
-       if (!cpu_has_fpu) {
-               finit_soft_fpu(&fpu->state->soft);
-               return;
-       }
-
-       memset(fpu->state, 0, xstate_size);
-
-       if (cpu_has_fxsr) {
-               fx_finit(&fpu->state->fxsave);
-       } else {
-               struct i387_fsave_struct *fp = &fpu->state->fsave;
-               fp->cwd = 0xffff037fu;
-               fp->swd = 0xffff0000u;
-               fp->twd = 0xffffffffu;
-               fp->fos = 0xffff0000u;
-       }
-}
-EXPORT_SYMBOL_GPL(fpu_finit);
-
-/*
- * The _current_ task is using the FPU for the first time
- * so initialize it and set the mxcsr to its default
- * value at reset if we support XMM instructions and then
- * remember the current task has used the FPU.
- */
-int init_fpu(struct task_struct *tsk)
-{
-       int ret;
-
-       if (tsk_used_math(tsk)) {
-               if (cpu_has_fpu && tsk == current)
-                       unlazy_fpu(tsk);
-               task_disable_lazy_fpu_restore(tsk);
-               return 0;
-       }
-
-       /*
-        * Memory allocation at the first usage of the FPU and other state.
-        */
-       ret = fpu_alloc(&tsk->thread.fpu);
-       if (ret)
-               return ret;
-
-       fpu_finit(&tsk->thread.fpu);
-
-       set_stopped_child_used_math(tsk);
-       return 0;
-}
-EXPORT_SYMBOL_GPL(init_fpu);
-
-/*
- * The xstateregs_active() routine is the same as the fpregs_active() routine,
- * as the "regset->n" for the xstate regset will be updated based on the feature
- * capabilites supported by the xsave.
- */
-int fpregs_active(struct task_struct *target, const struct user_regset *regset)
-{
-       return tsk_used_math(target) ? regset->n : 0;
-}
-
-int xfpregs_active(struct task_struct *target, const struct user_regset *regset)
-{
-       return (cpu_has_fxsr && tsk_used_math(target)) ? regset->n : 0;
-}
-
-int xfpregs_get(struct task_struct *target, const struct user_regset *regset,
-               unsigned int pos, unsigned int count,
-               void *kbuf, void __user *ubuf)
-{
-       int ret;
-
-       if (!cpu_has_fxsr)
-               return -ENODEV;
-
-       ret = init_fpu(target);
-       if (ret)
-               return ret;
-
-       sanitize_i387_state(target);
-
-       return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
-                                  &target->thread.fpu.state->fxsave, 0, -1);
-}
-
-int xfpregs_set(struct task_struct *target, const struct user_regset *regset,
-               unsigned int pos, unsigned int count,
-               const void *kbuf, const void __user *ubuf)
-{
-       int ret;
-
-       if (!cpu_has_fxsr)
-               return -ENODEV;
-
-       ret = init_fpu(target);
-       if (ret)
-               return ret;
-
-       sanitize_i387_state(target);
-
-       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-                                &target->thread.fpu.state->fxsave, 0, -1);
-
-       /*
-        * mxcsr reserved bits must be masked to zero for security reasons.
-        */
-       target->thread.fpu.state->fxsave.mxcsr &= mxcsr_feature_mask;
-
-       /*
-        * update the header bits in the xsave header, indicating the
-        * presence of FP and SSE state.
-        */
-       if (cpu_has_xsave)
-               target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FPSSE;
-
-       return ret;
-}
-
-int xstateregs_get(struct task_struct *target, const struct user_regset *regset,
-               unsigned int pos, unsigned int count,
-               void *kbuf, void __user *ubuf)
-{
-       struct xsave_struct *xsave;
-       int ret;
-
-       if (!cpu_has_xsave)
-               return -ENODEV;
-
-       ret = init_fpu(target);
-       if (ret)
-               return ret;
-
-       xsave = &target->thread.fpu.state->xsave;
-
-       /*
-        * Copy the 48bytes defined by the software first into the xstate
-        * memory layout in the thread struct, so that we can copy the entire
-        * xstateregs to the user using one user_regset_copyout().
-        */
-       memcpy(&xsave->i387.sw_reserved,
-               xstate_fx_sw_bytes, sizeof(xstate_fx_sw_bytes));
-       /*
-        * Copy the xstate memory layout.
-        */
-       ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
-       return ret;
-}
-
-int xstateregs_set(struct task_struct *target, const struct user_regset *regset,
-                 unsigned int pos, unsigned int count,
-                 const void *kbuf, const void __user *ubuf)
-{
-       struct xsave_struct *xsave;
-       int ret;
-
-       if (!cpu_has_xsave)
-               return -ENODEV;
-
-       ret = init_fpu(target);
-       if (ret)
-               return ret;
-
-       xsave = &target->thread.fpu.state->xsave;
-
-       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, xsave, 0, -1);
-       /*
-        * mxcsr reserved bits must be masked to zero for security reasons.
-        */
-       xsave->i387.mxcsr &= mxcsr_feature_mask;
-       xsave->xsave_hdr.xstate_bv &= pcntxt_mask;
-       /*
-        * These bits must be zero.
-        */
-       memset(&xsave->xsave_hdr.reserved, 0, 48);
-       return ret;
-}
-
-#if defined CONFIG_X86_32 || defined CONFIG_IA32_EMULATION
-
-/*
- * FPU tag word conversions.
- */
-
-static inline unsigned short twd_i387_to_fxsr(unsigned short twd)
-{
-       unsigned int tmp; /* to avoid 16 bit prefixes in the code */
-
-       /* Transform each pair of bits into 01 (valid) or 00 (empty) */
-       tmp = ~twd;
-       tmp = (tmp | (tmp>>1)) & 0x5555; /* 0V0V0V0V0V0V0V0V */
-       /* and move the valid bits to the lower byte. */
-       tmp = (tmp | (tmp >> 1)) & 0x3333; /* 00VV00VV00VV00VV */
-       tmp = (tmp | (tmp >> 2)) & 0x0f0f; /* 0000VVVV0000VVVV */
-       tmp = (tmp | (tmp >> 4)) & 0x00ff; /* 00000000VVVVVVVV */
-
-       return tmp;
-}
-
-#define FPREG_ADDR(f, n)       ((void *)&(f)->st_space + (n) * 16)
-#define FP_EXP_TAG_VALID       0
-#define FP_EXP_TAG_ZERO                1
-#define FP_EXP_TAG_SPECIAL     2
-#define FP_EXP_TAG_EMPTY       3
-
-static inline u32 twd_fxsr_to_i387(struct i387_fxsave_struct *fxsave)
-{
-       struct _fpxreg *st;
-       u32 tos = (fxsave->swd >> 11) & 7;
-       u32 twd = (unsigned long) fxsave->twd;
-       u32 tag;
-       u32 ret = 0xffff0000u;
-       int i;
-
-       for (i = 0; i < 8; i++, twd >>= 1) {
-               if (twd & 0x1) {
-                       st = FPREG_ADDR(fxsave, (i - tos) & 7);
-
-                       switch (st->exponent & 0x7fff) {
-                       case 0x7fff:
-                               tag = FP_EXP_TAG_SPECIAL;
-                               break;
-                       case 0x0000:
-                               if (!st->significand[0] &&
-                                   !st->significand[1] &&
-                                   !st->significand[2] &&
-                                   !st->significand[3])
-                                       tag = FP_EXP_TAG_ZERO;
-                               else
-                                       tag = FP_EXP_TAG_SPECIAL;
-                               break;
-                       default:
-                               if (st->significand[3] & 0x8000)
-                                       tag = FP_EXP_TAG_VALID;
-                               else
-                                       tag = FP_EXP_TAG_SPECIAL;
-                               break;
-                       }
-               } else {
-                       tag = FP_EXP_TAG_EMPTY;
-               }
-               ret |= tag << (2 * i);
-       }
-       return ret;
-}
-
-/*
- * FXSR floating point environment conversions.
- */
-
-void
-convert_from_fxsr(struct user_i387_ia32_struct *env, struct task_struct *tsk)
-{
-       struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
-       struct _fpreg *to = (struct _fpreg *) &env->st_space[0];
-       struct _fpxreg *from = (struct _fpxreg *) &fxsave->st_space[0];
-       int i;
-
-       env->cwd = fxsave->cwd | 0xffff0000u;
-       env->swd = fxsave->swd | 0xffff0000u;
-       env->twd = twd_fxsr_to_i387(fxsave);
-
-#ifdef CONFIG_X86_64
-       env->fip = fxsave->rip;
-       env->foo = fxsave->rdp;
-       /*
-        * should be actually ds/cs at fpu exception time, but
-        * that information is not available in 64bit mode.
-        */
-       env->fcs = task_pt_regs(tsk)->cs;
-       if (tsk == current) {
-               savesegment(ds, env->fos);
-       } else {
-               env->fos = tsk->thread.ds;
-       }
-       env->fos |= 0xffff0000;
-#else
-       env->fip = fxsave->fip;
-       env->fcs = (u16) fxsave->fcs | ((u32) fxsave->fop << 16);
-       env->foo = fxsave->foo;
-       env->fos = fxsave->fos;
-#endif
-
-       for (i = 0; i < 8; ++i)
-               memcpy(&to[i], &from[i], sizeof(to[0]));
-}
-
-void convert_to_fxsr(struct task_struct *tsk,
-                    const struct user_i387_ia32_struct *env)
-
-{
-       struct i387_fxsave_struct *fxsave = &tsk->thread.fpu.state->fxsave;
-       struct _fpreg *from = (struct _fpreg *) &env->st_space[0];
-       struct _fpxreg *to = (struct _fpxreg *) &fxsave->st_space[0];
-       int i;
-
-       fxsave->cwd = env->cwd;
-       fxsave->swd = env->swd;
-       fxsave->twd = twd_i387_to_fxsr(env->twd);
-       fxsave->fop = (u16) ((u32) env->fcs >> 16);
-#ifdef CONFIG_X86_64
-       fxsave->rip = env->fip;
-       fxsave->rdp = env->foo;
-       /* cs and ds ignored */
-#else
-       fxsave->fip = env->fip;
-       fxsave->fcs = (env->fcs & 0xffff);
-       fxsave->foo = env->foo;
-       fxsave->fos = env->fos;
-#endif
-
-       for (i = 0; i < 8; ++i)
-               memcpy(&to[i], &from[i], sizeof(from[0]));
-}
-
-int fpregs_get(struct task_struct *target, const struct user_regset *regset,
-              unsigned int pos, unsigned int count,
-              void *kbuf, void __user *ubuf)
-{
-       struct user_i387_ia32_struct env;
-       int ret;
-
-       ret = init_fpu(target);
-       if (ret)
-               return ret;
-
-       if (!static_cpu_has(X86_FEATURE_FPU))
-               return fpregs_soft_get(target, regset, pos, count, kbuf, ubuf);
-
-       if (!cpu_has_fxsr)
-               return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
-                                          &target->thread.fpu.state->fsave, 0,
-                                          -1);
-
-       sanitize_i387_state(target);
-
-       if (kbuf && pos == 0 && count == sizeof(env)) {
-               convert_from_fxsr(kbuf, target);
-               return 0;
-       }
-
-       convert_from_fxsr(&env, target);
-
-       return user_regset_copyout(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
-}
-
-int fpregs_set(struct task_struct *target, const struct user_regset *regset,
-              unsigned int pos, unsigned int count,
-              const void *kbuf, const void __user *ubuf)
-{
-       struct user_i387_ia32_struct env;
-       int ret;
-
-       ret = init_fpu(target);
-       if (ret)
-               return ret;
-
-       sanitize_i387_state(target);
-
-       if (!static_cpu_has(X86_FEATURE_FPU))
-               return fpregs_soft_set(target, regset, pos, count, kbuf, ubuf);
-
-       if (!cpu_has_fxsr)
-               return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
-                                         &target->thread.fpu.state->fsave, 0,
-                                         -1);
-
-       if (pos > 0 || count < sizeof(env))
-               convert_from_fxsr(&env, target);
-
-       ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &env, 0, -1);
-       if (!ret)
-               convert_to_fxsr(target, &env);
-
-       /*
-        * update the header bit in the xsave header, indicating the
-        * presence of FP.
-        */
-       if (cpu_has_xsave)
-               target->thread.fpu.state->xsave.xsave_hdr.xstate_bv |= XSTATE_FP;
-       return ret;
-}
-
-/*
- * FPU state for core dumps.
- * This is only used for a.out dumps now.
- * It is declared generically using elf_fpregset_t (which is
- * struct user_i387_struct) but is in fact only used for 32-bit
- * dumps, so on 64-bit it is really struct user_i387_ia32_struct.
- */
-int dump_fpu(struct pt_regs *regs, struct user_i387_struct *fpu)
-{
-       struct task_struct *tsk = current;
-       int fpvalid;
-
-       fpvalid = !!used_math();
-       if (fpvalid)
-               fpvalid = !fpregs_get(tsk, NULL,
-                                     0, sizeof(struct user_i387_ia32_struct),
-                                     fpu, NULL);
-
-       return fpvalid;
-}
-EXPORT_SYMBOL(dump_fpu);
-
-#endif /* CONFIG_X86_32 || CONFIG_IA32_EMULATION */
-
-static int __init no_387(char *s)
-{
-       setup_clear_cpu_cap(X86_FEATURE_FPU);
-       return 1;
-}
-
-__setup("no387", no_387);
-
-void fpu_detect(struct cpuinfo_x86 *c)
-{
-       unsigned long cr0;
-       u16 fsw, fcw;
-
-       fsw = fcw = 0xffff;
-
-       cr0 = read_cr0();
-       cr0 &= ~(X86_CR0_TS | X86_CR0_EM);
-       write_cr0(cr0);
-
-       asm volatile("fninit ; fnstsw %0 ; fnstcw %1"
-                    : "+m" (fsw), "+m" (fcw));
-
-       if (fsw == 0 && (fcw & 0x103f) == 0x003f)
-               set_cpu_cap(c, X86_FEATURE_FPU);
-       else
-               clear_cpu_cap(c, X86_FEATURE_FPU);
-
-       /* The final cr0 value is set in fpu_init() */
-}
index e7cc5370cd2fcade87dc1cecae2ab85184f62d27..16cb827a5b27745d1f571d49a5b45443babb2c8c 100644 (file)
@@ -329,8 +329,8 @@ static void init_8259A(int auto_eoi)
         */
        outb_pic(0x11, PIC_MASTER_CMD); /* ICW1: select 8259A-1 init */
 
-       /* ICW2: 8259A-1 IR0-7 mapped to 0x30-0x37 */
-       outb_pic(IRQ0_VECTOR, PIC_MASTER_IMR);
+       /* ICW2: 8259A-1 IR0-7 mapped to ISA_IRQ_VECTOR(0) */
+       outb_pic(ISA_IRQ_VECTOR(0), PIC_MASTER_IMR);
 
        /* 8259A-1 (the master) has a slave on IR2 */
        outb_pic(1U << PIC_CASCADE_IR, PIC_MASTER_IMR);
@@ -342,8 +342,8 @@ static void init_8259A(int auto_eoi)
 
        outb_pic(0x11, PIC_SLAVE_CMD);  /* ICW1: select 8259A-2 init */
 
-       /* ICW2: 8259A-2 IR0-7 mapped to IRQ8_VECTOR */
-       outb_pic(IRQ8_VECTOR, PIC_SLAVE_IMR);
+       /* ICW2: 8259A-2 IR0-7 mapped to ISA_IRQ_VECTOR(8) */
+       outb_pic(ISA_IRQ_VECTOR(8), PIC_SLAVE_IMR);
        /* 8259A-2 is a slave on master's IR2 */
        outb_pic(PIC_CASCADE_IR, PIC_SLAVE_IMR);
        /* (slave's support for AEOI in flat mode is to be investigated) */
index e5952c22553241e2ceea5d5fd6f1f7b758cc960e..88b366487b0e44b613e83d412febfad74381ca92 100644 (file)
 #define CREATE_TRACE_POINTS
 #include <asm/trace/irq_vectors.h>
 
+DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
+EXPORT_PER_CPU_SYMBOL(irq_stat);
+
+DEFINE_PER_CPU(struct pt_regs *, irq_regs);
+EXPORT_PER_CPU_SYMBOL(irq_regs);
+
 atomic_t irq_err_count;
 
 /* Function pointer for generic interrupt vector handling */
@@ -116,6 +122,12 @@ int arch_show_interrupts(struct seq_file *p, int prec)
                seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
        seq_puts(p, "  Threshold APIC interrupts\n");
 #endif
+#ifdef CONFIG_X86_MCE_AMD
+       seq_printf(p, "%*s: ", prec, "DFR");
+       for_each_online_cpu(j)
+               seq_printf(p, "%10u ", irq_stats(j)->irq_deferred_error_count);
+       seq_puts(p, "  Deferred Error APIC interrupts\n");
+#endif
 #ifdef CONFIG_X86_MCE
        seq_printf(p, "%*s: ", prec, "MCE");
        for_each_online_cpu(j)
@@ -135,6 +147,18 @@ int arch_show_interrupts(struct seq_file *p, int prec)
        seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
 #if defined(CONFIG_X86_IO_APIC)
        seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
+#endif
+#ifdef CONFIG_HAVE_KVM
+       seq_printf(p, "%*s: ", prec, "PIN");
+       for_each_online_cpu(j)
+               seq_printf(p, "%10u ", irq_stats(j)->kvm_posted_intr_ipis);
+       seq_puts(p, "  Posted-interrupt notification event\n");
+
+       seq_printf(p, "%*s: ", prec, "PIW");
+       for_each_online_cpu(j)
+               seq_printf(p, "%10u ",
+                          irq_stats(j)->kvm_posted_intr_wakeup_ipis);
+       seq_puts(p, "  Posted-interrupt wakeup event\n");
 #endif
        return 0;
 }
@@ -192,8 +216,7 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
        unsigned vector = ~regs->orig_ax;
        unsigned irq;
 
-       irq_enter();
-       exit_idle();
+       entering_irq();
 
        irq = __this_cpu_read(vector_irq[vector]);
 
@@ -209,7 +232,7 @@ __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
                }
        }
 
-       irq_exit();
+       exiting_irq();
 
        set_irq_regs(old_regs);
        return 1;
@@ -237,6 +260,18 @@ __visible void smp_x86_platform_ipi(struct pt_regs *regs)
 }
 
 #ifdef CONFIG_HAVE_KVM
+static void dummy_handler(void) {}
+static void (*kvm_posted_intr_wakeup_handler)(void) = dummy_handler;
+
+void kvm_set_posted_intr_wakeup_handler(void (*handler)(void))
+{
+       if (handler)
+               kvm_posted_intr_wakeup_handler = handler;
+       else
+               kvm_posted_intr_wakeup_handler = dummy_handler;
+}
+EXPORT_SYMBOL_GPL(kvm_set_posted_intr_wakeup_handler);
+
 /*
  * Handler for POSTED_INTERRUPT_VECTOR.
  */
@@ -244,16 +279,23 @@ __visible void smp_kvm_posted_intr_ipi(struct pt_regs *regs)
 {
        struct pt_regs *old_regs = set_irq_regs(regs);
 
-       ack_APIC_irq();
-
-       irq_enter();
-
-       exit_idle();
-
+       entering_ack_irq();
        inc_irq_stat(kvm_posted_intr_ipis);
+       exiting_irq();
+       set_irq_regs(old_regs);
+}
 
-       irq_exit();
+/*
+ * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
+ */
+__visible void smp_kvm_posted_intr_wakeup_ipi(struct pt_regs *regs)
+{
+       struct pt_regs *old_regs = set_irq_regs(regs);
 
+       entering_ack_irq();
+       inc_irq_stat(kvm_posted_intr_wakeup_ipis);
+       kvm_posted_intr_wakeup_handler();
+       exiting_irq();
        set_irq_regs(old_regs);
 }
 #endif
index f9fd86a7fcc7d1bc8c2cc920fc5b5037f5859336..cd74f5978ab97a4c7d2ee072d23d764ab49ca6dd 100644 (file)
 
 #include <asm/apic.h>
 
-DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
-EXPORT_PER_CPU_SYMBOL(irq_stat);
-
-DEFINE_PER_CPU(struct pt_regs *, irq_regs);
-EXPORT_PER_CPU_SYMBOL(irq_regs);
-
 #ifdef CONFIG_DEBUG_STACKOVERFLOW
 
 int sysctl_panic_on_stackoverflow __read_mostly;
index 394e643d7830fc01d4da516bd79cab1dc0aa6962..bc4604e500a3284ab3b2f5903b4fc1d506c0b367 100644 (file)
 #include <asm/idle.h>
 #include <asm/apic.h>
 
-DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
-EXPORT_PER_CPU_SYMBOL(irq_stat);
-
-DEFINE_PER_CPU(struct pt_regs *, irq_regs);
-EXPORT_PER_CPU_SYMBOL(irq_regs);
-
 int sysctl_panic_on_stackoverflow;
 
 /*
index 15d741ddfeeb7c4497e28052e1ad7ea08e68c252..dc5fa6a1e8d640aa8fc407ee3035feb0a1778451 100644 (file)
 #include <asm/apic.h>
 #include <asm/trace/irq_vectors.h>
 
-static inline void irq_work_entering_irq(void)
-{
-       irq_enter();
-       ack_APIC_irq();
-}
-
 static inline void __smp_irq_work_interrupt(void)
 {
        inc_irq_stat(apic_irq_work_irqs);
@@ -24,14 +18,14 @@ static inline void __smp_irq_work_interrupt(void)
 
 __visible void smp_irq_work_interrupt(struct pt_regs *regs)
 {
-       irq_work_entering_irq();
+       ipi_entering_ack_irq();
        __smp_irq_work_interrupt();
        exiting_irq();
 }
 
 __visible void smp_trace_irq_work_interrupt(struct pt_regs *regs)
 {
-       irq_work_entering_irq();
+       ipi_entering_ack_irq();
        trace_irq_work_entry(IRQ_WORK_VECTOR);
        __smp_irq_work_interrupt();
        trace_irq_work_exit(IRQ_WORK_VECTOR);
index cd10a64372647c3579ba6717db49c6cd63c6353a..a3a5e158ed69553eaa623678d3812e008a4e24c7 100644 (file)
@@ -86,7 +86,7 @@ void __init init_IRQ(void)
        int i;
 
        /*
-        * On cpu 0, Assign IRQ0_VECTOR..IRQ15_VECTOR's to IRQ 0..15.
+        * On cpu 0, Assign ISA_IRQ_VECTOR(irq) to IRQ 0..15.
         * If these IRQ's are handled by legacy interrupt-controllers like PIC,
         * then this configuration will likely be static after the boot. If
         * these IRQ's are handled by more mordern controllers like IO-APIC,
@@ -94,7 +94,7 @@ void __init init_IRQ(void)
         * irq's migrate etc.
         */
        for (i = 0; i < nr_legacy_irqs(); i++)
-               per_cpu(vector_irq, 0)[IRQ0_VECTOR + i] = i;
+               per_cpu(vector_irq, 0)[ISA_IRQ_VECTOR(i)] = i;
 
        x86_init.irqs.intr_init();
 }
@@ -135,6 +135,10 @@ static void __init apic_intr_init(void)
        alloc_intr_gate(THRESHOLD_APIC_VECTOR, threshold_interrupt);
 #endif
 
+#ifdef CONFIG_X86_MCE_AMD
+       alloc_intr_gate(DEFERRED_ERROR_VECTOR, deferred_error_interrupt);
+#endif
+
 #ifdef CONFIG_X86_LOCAL_APIC
        /* self generated IPI for local APIC timer */
        alloc_intr_gate(LOCAL_TIMER_VECTOR, apic_timer_interrupt);
@@ -144,6 +148,8 @@ static void __init apic_intr_init(void)
 #ifdef CONFIG_HAVE_KVM
        /* IPI for KVM to deliver posted interrupt */
        alloc_intr_gate(POSTED_INTR_VECTOR, kvm_posted_intr_ipi);
+       /* IPI for KVM to deliver interrupt to wake up tasks */
+       alloc_intr_gate(POSTED_INTR_WAKEUP_VECTOR, kvm_posted_intr_wakeup_ipi);
 #endif
 
        /* IPI vectors for APIC spurious and error interrupts */
index 9435620062df30e549d1510baaf9c4c72ab90290..1681504e44a4c3479d26fd1c9d10f1fbee264d1f 100644 (file)
@@ -584,6 +584,39 @@ static void kvm_kick_cpu(int cpu)
        kvm_hypercall2(KVM_HC_KICK_CPU, flags, apicid);
 }
 
+
+#ifdef CONFIG_QUEUED_SPINLOCKS
+
+#include <asm/qspinlock.h>
+
+static void kvm_wait(u8 *ptr, u8 val)
+{
+       unsigned long flags;
+
+       if (in_nmi())
+               return;
+
+       local_irq_save(flags);
+
+       if (READ_ONCE(*ptr) != val)
+               goto out;
+
+       /*
+        * halt until it's our turn and kicked. Note that we do safe halt
+        * for irq enabled case to avoid hang when lock info is overwritten
+        * in irq spinlock slowpath and no spurious interrupt occur to save us.
+        */
+       if (arch_irqs_disabled_flags(flags))
+               halt();
+       else
+               safe_halt();
+
+out:
+       local_irq_restore(flags);
+}
+
+#else /* !CONFIG_QUEUED_SPINLOCKS */
+
 enum kvm_contention_stat {
        TAKEN_SLOW,
        TAKEN_SLOW_PICKUP,
@@ -817,6 +850,8 @@ static void kvm_unlock_kick(struct arch_spinlock *lock, __ticket_t ticket)
        }
 }
 
+#endif /* !CONFIG_QUEUED_SPINLOCKS */
+
 /*
  * Setup pv_lock_ops to exploit KVM_FEATURE_PV_UNHALT if present.
  */
@@ -828,8 +863,16 @@ void __init kvm_spinlock_init(void)
        if (!kvm_para_has_feature(KVM_FEATURE_PV_UNHALT))
                return;
 
+#ifdef CONFIG_QUEUED_SPINLOCKS
+       __pv_init_lock_hash();
+       pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
+       pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
+       pv_lock_ops.wait = kvm_wait;
+       pv_lock_ops.kick = kvm_kick_cpu;
+#else /* !CONFIG_QUEUED_SPINLOCKS */
        pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(kvm_lock_spinning);
        pv_lock_ops.unlock_kick = kvm_unlock_kick;
+#endif
 }
 
 static __init int kvm_spinlock_init_jump(void)
index 415480d3ea848bcf95e7ae26b56a5261add1cc8c..11546b462fa6d93782a9bd164e9f7f163fbb28f0 100644 (file)
@@ -17,6 +17,7 @@
 #include <linux/ftrace.h>
 #include <linux/io.h>
 #include <linux/suspend.h>
+#include <linux/vmalloc.h>
 
 #include <asm/init.h>
 #include <asm/pgtable.h>
index 2d2a237f2c73698a4dd2819800edd6179239904b..30ca7607cbbbbcae4793aa5c14d8f73bbd784d71 100644 (file)
@@ -19,8 +19,8 @@
 #include <linux/module.h>
 #include <linux/smp.h>
 #include <linux/pci.h>
-#include <linux/irqdomain.h>
 
+#include <asm/irqdomain.h>
 #include <asm/mtrr.h>
 #include <asm/mpspec.h>
 #include <asm/pgalloc.h>
@@ -113,11 +113,6 @@ static void __init MP_bus_info(struct mpc_bus *m)
                pr_warn("Unknown bustype %s - ignoring\n", str);
 }
 
-static struct irq_domain_ops mp_ioapic_irqdomain_ops = {
-       .map = mp_irqdomain_map,
-       .unmap = mp_irqdomain_unmap,
-};
-
 static void __init MP_ioapic_info(struct mpc_ioapic *m)
 {
        struct ioapic_domain_cfg cfg = {
index bbb6c7316341f806dc3cb3a5bdce52cbb28de2a3..33ee3e0efd65bccc9ca049b07a97e81428ead282 100644 (file)
@@ -8,11 +8,33 @@
 
 #include <asm/paravirt.h>
 
+#ifdef CONFIG_QUEUED_SPINLOCKS
+__visible void __native_queued_spin_unlock(struct qspinlock *lock)
+{
+       native_queued_spin_unlock(lock);
+}
+
+PV_CALLEE_SAVE_REGS_THUNK(__native_queued_spin_unlock);
+
+bool pv_is_native_spin_unlock(void)
+{
+       return pv_lock_ops.queued_spin_unlock.func ==
+               __raw_callee_save___native_queued_spin_unlock;
+}
+#endif
+
 struct pv_lock_ops pv_lock_ops = {
 #ifdef CONFIG_SMP
+#ifdef CONFIG_QUEUED_SPINLOCKS
+       .queued_spin_lock_slowpath = native_queued_spin_lock_slowpath,
+       .queued_spin_unlock = PV_CALLEE_SAVE(__native_queued_spin_unlock),
+       .wait = paravirt_nop,
+       .kick = paravirt_nop,
+#else /* !CONFIG_QUEUED_SPINLOCKS */
        .lock_spinning = __PV_IS_CALLEE_SAVE(paravirt_nop),
        .unlock_kick = paravirt_nop,
-#endif
+#endif /* !CONFIG_QUEUED_SPINLOCKS */
+#endif /* SMP */
 };
 EXPORT_SYMBOL(pv_lock_ops);
 
index c614dd492f5f720058346a33883f5a67d4689b94..58bcfb67c01f1f1f9b60a6e87bf5fd11b80bd281 100644 (file)
@@ -154,7 +154,9 @@ unsigned paravirt_patch_default(u8 type, u16 clobbers, void *insnbuf,
                ret = paravirt_patch_ident_64(insnbuf, len);
 
        else if (type == PARAVIRT_PATCH(pv_cpu_ops.iret) ||
+#ifdef CONFIG_X86_32
                 type == PARAVIRT_PATCH(pv_cpu_ops.irq_enable_sysexit) ||
+#endif
                 type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret32) ||
                 type == PARAVIRT_PATCH(pv_cpu_ops.usergs_sysret64))
                /* If operation requires a jmp, then jmp */
@@ -371,7 +373,7 @@ __visible struct pv_cpu_ops pv_cpu_ops = {
 
        .load_sp0 = native_load_sp0,
 
-#if defined(CONFIG_X86_32) || defined(CONFIG_IA32_EMULATION)
+#if defined(CONFIG_X86_32)
        .irq_enable_sysexit = native_irq_enable_sysexit,
 #endif
 #ifdef CONFIG_X86_64
index d9f32e6d6ab65476be60c34d06c522215b83a26b..e1b013696dde586ba1a80f23535ac07b1bee3e81 100644 (file)
@@ -12,6 +12,10 @@ DEF_NATIVE(pv_mmu_ops, read_cr3, "mov %cr3, %eax");
 DEF_NATIVE(pv_cpu_ops, clts, "clts");
 DEF_NATIVE(pv_cpu_ops, read_tsc, "rdtsc");
 
+#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
+DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%eax)");
+#endif
+
 unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
 {
        /* arg in %eax, return in %eax */
@@ -24,6 +28,8 @@ unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
        return 0;
 }
 
+extern bool pv_is_native_spin_unlock(void);
+
 unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
                      unsigned long addr, unsigned len)
 {
@@ -47,14 +53,22 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
                PATCH_SITE(pv_mmu_ops, write_cr3);
                PATCH_SITE(pv_cpu_ops, clts);
                PATCH_SITE(pv_cpu_ops, read_tsc);
-
-       patch_site:
-               ret = paravirt_patch_insns(ibuf, len, start, end);
-               break;
+#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
+               case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
+                       if (pv_is_native_spin_unlock()) {
+                               start = start_pv_lock_ops_queued_spin_unlock;
+                               end   = end_pv_lock_ops_queued_spin_unlock;
+                               goto patch_site;
+                       }
+#endif
 
        default:
                ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
                break;
+
+patch_site:
+               ret = paravirt_patch_insns(ibuf, len, start, end);
+               break;
        }
 #undef PATCH_SITE
        return ret;
index a1da6737ba5b80c4ee636204d49d4813348ef903..8aa05583bc42dec102b3fffb63d2fa6a828eee4c 100644 (file)
@@ -21,6 +21,10 @@ DEF_NATIVE(pv_cpu_ops, swapgs, "swapgs");
 DEF_NATIVE(, mov32, "mov %edi, %eax");
 DEF_NATIVE(, mov64, "mov %rdi, %rax");
 
+#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
+DEF_NATIVE(pv_lock_ops, queued_spin_unlock, "movb $0, (%rdi)");
+#endif
+
 unsigned paravirt_patch_ident_32(void *insnbuf, unsigned len)
 {
        return paravirt_patch_insns(insnbuf, len,
@@ -33,6 +37,8 @@ unsigned paravirt_patch_ident_64(void *insnbuf, unsigned len)
                                    start__mov64, end__mov64);
 }
 
+extern bool pv_is_native_spin_unlock(void);
+
 unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
                      unsigned long addr, unsigned len)
 {
@@ -49,7 +55,6 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
                PATCH_SITE(pv_irq_ops, save_fl);
                PATCH_SITE(pv_irq_ops, irq_enable);
                PATCH_SITE(pv_irq_ops, irq_disable);
-               PATCH_SITE(pv_cpu_ops, irq_enable_sysexit);
                PATCH_SITE(pv_cpu_ops, usergs_sysret32);
                PATCH_SITE(pv_cpu_ops, usergs_sysret64);
                PATCH_SITE(pv_cpu_ops, swapgs);
@@ -59,14 +64,22 @@ unsigned native_patch(u8 type, u16 clobbers, void *ibuf,
                PATCH_SITE(pv_cpu_ops, clts);
                PATCH_SITE(pv_mmu_ops, flush_tlb_single);
                PATCH_SITE(pv_cpu_ops, wbinvd);
-
-       patch_site:
-               ret = paravirt_patch_insns(ibuf, len, start, end);
-               break;
+#if defined(CONFIG_PARAVIRT_SPINLOCKS) && defined(CONFIG_QUEUED_SPINLOCKS)
+               case PARAVIRT_PATCH(pv_lock_ops.queued_spin_unlock):
+                       if (pv_is_native_spin_unlock()) {
+                               start = start_pv_lock_ops_queued_spin_unlock;
+                               end   = end_pv_lock_ops_queued_spin_unlock;
+                               goto patch_site;
+                       }
+#endif
 
        default:
                ret = paravirt_patch_default(type, clobbers, ibuf, addr, len);
                break;
+
+patch_site:
+               ret = paravirt_patch_insns(ibuf, len, start, end);
+               break;
        }
 #undef PATCH_SITE
        return ret;
index a25e202bb319caf87ce147831f6a1a47ed03e254..353972c1946cd35f378054439a05bed8200f92c9 100644 (file)
@@ -140,6 +140,51 @@ void dma_generic_free_coherent(struct device *dev, size_t size, void *vaddr,
                free_pages((unsigned long)vaddr, get_order(size));
 }
 
+void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
+                     gfp_t gfp, struct dma_attrs *attrs)
+{
+       struct dma_map_ops *ops = get_dma_ops(dev);
+       void *memory;
+
+       gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
+
+       if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
+               return memory;
+
+       if (!dev)
+               dev = &x86_dma_fallback_dev;
+
+       if (!is_device_dma_capable(dev))
+               return NULL;
+
+       if (!ops->alloc)
+               return NULL;
+
+       memory = ops->alloc(dev, size, dma_handle,
+                           dma_alloc_coherent_gfp_flags(dev, gfp), attrs);
+       debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
+
+       return memory;
+}
+EXPORT_SYMBOL(dma_alloc_attrs);
+
+void dma_free_attrs(struct device *dev, size_t size,
+                   void *vaddr, dma_addr_t bus,
+                   struct dma_attrs *attrs)
+{
+       struct dma_map_ops *ops = get_dma_ops(dev);
+
+       WARN_ON(irqs_disabled());       /* for portability */
+
+       if (dma_release_from_coherent(dev, get_order(size), vaddr))
+               return;
+
+       debug_dma_free_coherent(dev, size, vaddr, bus);
+       if (ops->free)
+               ops->free(dev, size, vaddr, bus, attrs);
+}
+EXPORT_SYMBOL(dma_free_attrs);
+
 /*
  * See <Documentation/x86/x86_64/boot-options.txt> for the iommu kernel
  * parameter documentation.
index 77dd0ad58be4a6c9c8af113189c9805c1a535633..adf0392d549aa465e8f3e0ac336ea6f0e5967f1c 100644 (file)
@@ -20,6 +20,13 @@ void *x86_swiotlb_alloc_coherent(struct device *hwdev, size_t size,
 {
        void *vaddr;
 
+       /*
+        * Don't print a warning when the first allocation attempt fails.
+        * swiotlb_alloc_coherent() will print a warning when the DMA
+        * memory allocation ultimately failed.
+        */
+       flags |= __GFP_NOWARN;
+
        vaddr = dma_generic_alloc_coherent(hwdev, size, dma_handle, flags,
                                           attrs);
        if (vaddr)
index 6e338e3b1dc04cc69ab41c012fe5671cc25321cd..9cad694ed7c4d6a755b34af705e0a055cb0c04aa 100644 (file)
@@ -25,8 +25,7 @@
 #include <asm/idle.h>
 #include <asm/uaccess.h>
 #include <asm/mwait.h>
-#include <asm/i387.h>
-#include <asm/fpu-internal.h>
+#include <asm/fpu/internal.h>
 #include <asm/debugreg.h>
 #include <asm/nmi.h>
 #include <asm/tlbflush.h>
@@ -76,9 +75,6 @@ void idle_notifier_unregister(struct notifier_block *n)
 EXPORT_SYMBOL_GPL(idle_notifier_unregister);
 #endif
 
-struct kmem_cache *task_xstate_cachep;
-EXPORT_SYMBOL_GPL(task_xstate_cachep);
-
 /*
  * this gets called so that we can store lazy state into memory and copy the
  * current task into the new thread.
@@ -87,36 +83,7 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
 {
        *dst = *src;
 
-       dst->thread.fpu_counter = 0;
-       dst->thread.fpu.has_fpu = 0;
-       dst->thread.fpu.state = NULL;
-       task_disable_lazy_fpu_restore(dst);
-       if (tsk_used_math(src)) {
-               int err = fpu_alloc(&dst->thread.fpu);
-               if (err)
-                       return err;
-               fpu_copy(dst, src);
-       }
-       return 0;
-}
-
-void free_thread_xstate(struct task_struct *tsk)
-{
-       fpu_free(&tsk->thread.fpu);
-}
-
-void arch_release_task_struct(struct task_struct *tsk)
-{
-       free_thread_xstate(tsk);
-}
-
-void arch_task_cache_init(void)
-{
-        task_xstate_cachep =
-               kmem_cache_create("task_xstate", xstate_size,
-                                 __alignof__(union thread_xstate),
-                                 SLAB_PANIC | SLAB_NOTRACK, NULL);
-       setup_xstate_comp();
+       return fpu__copy(&dst->thread.fpu, &src->thread.fpu);
 }
 
 /*
@@ -127,6 +94,7 @@ void exit_thread(void)
        struct task_struct *me = current;
        struct thread_struct *t = &me->thread;
        unsigned long *bp = t->io_bitmap_ptr;
+       struct fpu *fpu = &t->fpu;
 
        if (bp) {
                struct tss_struct *tss = &per_cpu(cpu_tss, get_cpu());
@@ -142,7 +110,7 @@ void exit_thread(void)
                kfree(bp);
        }
 
-       drop_fpu(me);
+       fpu__drop(fpu);
 }
 
 void flush_thread(void)
@@ -152,19 +120,7 @@ void flush_thread(void)
        flush_ptrace_hw_breakpoint(tsk);
        memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
 
-       if (!use_eager_fpu()) {
-               /* FPU state will be reallocated lazily at the first use. */
-               drop_fpu(tsk);
-               free_thread_xstate(tsk);
-       } else {
-               if (!tsk_used_math(tsk)) {
-                       /* kthread execs. TODO: cleanup this horror. */
-                       if (WARN_ON(init_fpu(tsk)))
-                               force_sig(SIGKILL, tsk);
-                       user_fpu_begin();
-               }
-               restore_init_xstate();
-       }
+       fpu__clear(&tsk->thread.fpu);
 }
 
 static void hard_disable_TSC(void)
@@ -445,11 +401,10 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
 }
 
 /*
- * MONITOR/MWAIT with no hints, used for default default C1 state.
- * This invokes MWAIT with interrutps enabled and no flags,
- * which is backwards compatible with the original MWAIT implementation.
+ * MONITOR/MWAIT with no hints, used for default C1 state. This invokes MWAIT
+ * with interrupts enabled and no flags, which is backwards compatible with the
+ * original MWAIT implementation.
  */
-
 static void mwait_idle(void)
 {
        if (!current_set_polling_and_test()) {
index 8ed2106b06da63e0a8e0dcf561aad7a1fc112e40..c09c99ccf3e33fc5afff500b6310e2e6f9150526 100644 (file)
@@ -39,8 +39,7 @@
 #include <asm/pgtable.h>
 #include <asm/ldt.h>
 #include <asm/processor.h>
-#include <asm/i387.h>
-#include <asm/fpu-internal.h>
+#include <asm/fpu/internal.h>
 #include <asm/desc.h>
 #ifdef CONFIG_MATH_EMULATION
 #include <asm/math_emu.h>
@@ -242,14 +241,16 @@ __visible __notrace_funcgraph struct task_struct *
 __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 {
        struct thread_struct *prev = &prev_p->thread,
-                                *next = &next_p->thread;
+                            *next = &next_p->thread;
+       struct fpu *prev_fpu = &prev->fpu;
+       struct fpu *next_fpu = &next->fpu;
        int cpu = smp_processor_id();
        struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
-       fpu_switch_t fpu;
+       fpu_switch_t fpu_switch;
 
        /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */
 
-       fpu = switch_fpu_prepare(prev_p, next_p, cpu);
+       fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);
 
        /*
         * Save away %gs. No need to save %fs, as it was saved on the
@@ -296,19 +297,16 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
         * Leave lazy mode, flushing any hypercalls made here.
         * This must be done before restoring TLS segments so
         * the GDT and LDT are properly updated, and must be
-        * done before math_state_restore, so the TS bit is up
+        * done before fpu__restore(), so the TS bit is up
         * to date.
         */
        arch_end_context_switch(next_p);
 
        /*
-        * Reload esp0, kernel_stack, and current_top_of_stack.  This changes
+        * Reload esp0 and cpu_current_top_of_stack.  This changes
         * current_thread_info().
         */
        load_sp0(tss, next);
-       this_cpu_write(kernel_stack,
-                      (unsigned long)task_stack_page(next_p) +
-                      THREAD_SIZE);
        this_cpu_write(cpu_current_top_of_stack,
                       (unsigned long)task_stack_page(next_p) +
                       THREAD_SIZE);
@@ -319,7 +317,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        if (prev->gs | next->gs)
                lazy_load_gs(next->gs);
 
-       switch_fpu_finish(next_p, fpu);
+       switch_fpu_finish(next_fpu, fpu_switch);
 
        this_cpu_write(current_task, next_p);
 
index ddfdbf74f1744c235fb80ee2ee0718152882b2ce..843f92e4c7110cd621fb94dfca8cb980044fa32d 100644 (file)
@@ -38,8 +38,7 @@
 
 #include <asm/pgtable.h>
 #include <asm/processor.h>
-#include <asm/i387.h>
-#include <asm/fpu-internal.h>
+#include <asm/fpu/internal.h>
 #include <asm/mmu_context.h>
 #include <asm/prctl.h>
 #include <asm/desc.h>
@@ -274,12 +273,14 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
 {
        struct thread_struct *prev = &prev_p->thread;
        struct thread_struct *next = &next_p->thread;
+       struct fpu *prev_fpu = &prev->fpu;
+       struct fpu *next_fpu = &next->fpu;
        int cpu = smp_processor_id();
        struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
        unsigned fsindex, gsindex;
-       fpu_switch_t fpu;
+       fpu_switch_t fpu_switch;
 
-       fpu = switch_fpu_prepare(prev_p, next_p, cpu);
+       fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);
 
        /* We must save %fs and %gs before load_TLS() because
         * %fs and %gs may be cleared by load_TLS().
@@ -299,7 +300,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
         * Leave lazy mode, flushing any hypercalls made here.  This
         * must be done after loading TLS entries in the GDT but before
         * loading segments that might reference them, and and it must
-        * be done before math_state_restore, so the TS bit is up to
+        * be done before fpu__restore(), so the TS bit is up to
         * date.
         */
        arch_end_context_switch(next_p);
@@ -391,7 +392,7 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
                wrmsrl(MSR_KERNEL_GS_BASE, next->gs);
        prev->gsindex = gsindex;
 
-       switch_fpu_finish(next_p, fpu);
+       switch_fpu_finish(next_fpu, fpu_switch);
 
        /*
         * Switch the PDA and FPU contexts.
@@ -409,9 +410,6 @@ __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
        /* Reload esp0 and ss1.  This changes current_thread_info(). */
        load_sp0(tss, next);
 
-       this_cpu_write(kernel_stack,
-               (unsigned long)task_stack_page(next_p) + THREAD_SIZE);
-
        /*
         * Now maybe reload the debug registers and handle I/O bitmaps
         */
index a7bc794807195af79b6c15054b1941867d373198..9be72bc3613f80a9b5a5ae1bbf01adefdcfc29ce 100644 (file)
@@ -11,7 +11,6 @@
 #include <linux/errno.h>
 #include <linux/slab.h>
 #include <linux/ptrace.h>
-#include <linux/regset.h>
 #include <linux/tracehook.h>
 #include <linux/user.h>
 #include <linux/elf.h>
@@ -28,8 +27,9 @@
 #include <asm/uaccess.h>
 #include <asm/pgtable.h>
 #include <asm/processor.h>
-#include <asm/i387.h>
-#include <asm/fpu-internal.h>
+#include <asm/fpu/internal.h>
+#include <asm/fpu/signal.h>
+#include <asm/fpu/regset.h>
 #include <asm/debugreg.h>
 #include <asm/ldt.h>
 #include <asm/desc.h>
@@ -1297,7 +1297,7 @@ static struct user_regset x86_64_regsets[] __read_mostly = {
                .core_note_type = NT_PRFPREG,
                .n = sizeof(struct user_i387_struct) / sizeof(long),
                .size = sizeof(long), .align = sizeof(long),
-               .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
+               .active = regset_xregset_fpregs_active, .get = xfpregs_get, .set = xfpregs_set
        },
        [REGSET_XSTATE] = {
                .core_note_type = NT_X86_XSTATE,
@@ -1338,13 +1338,13 @@ static struct user_regset x86_32_regsets[] __read_mostly = {
                .core_note_type = NT_PRFPREG,
                .n = sizeof(struct user_i387_ia32_struct) / sizeof(u32),
                .size = sizeof(u32), .align = sizeof(u32),
-               .active = fpregs_active, .get = fpregs_get, .set = fpregs_set
+               .active = regset_fpregs_active, .get = fpregs_get, .set = fpregs_set
        },
        [REGSET_XFP] = {
                .core_note_type = NT_PRXFPREG,
                .n = sizeof(struct user32_fxsr_struct) / sizeof(u32),
                .size = sizeof(u32), .align = sizeof(u32),
-               .active = xfpregs_active, .get = xfpregs_get, .set = xfpregs_set
+               .active = regset_xregset_fpregs_active, .get = xfpregs_get, .set = xfpregs_set
        },
        [REGSET_XSTATE] = {
                .core_note_type = NT_X86_XSTATE,
index d74ac33290ae3eeef46b923c4556d644b72f0d5a..265a6fdea8b73c053b1080455994fe67c9449143 100644 (file)
@@ -531,12 +531,14 @@ static void __init reserve_crashkernel_low(void)
        if (ret != 0) {
                /*
                 * two parts from lib/swiotlb.c:
-                *      swiotlb size: user specified with swiotlb= or default.
-                *      swiotlb overflow buffer: now is hardcoded to 32k.
-                *              We round it to 8M for other buffers that
-                *              may need to stay low too.
+                * -swiotlb size: user-specified with swiotlb= or default.
+                *
+                * -swiotlb overflow buffer: now hardcoded to 32k. We round it
+                * to 8M for other buffers that may need to stay low too. Also
+                * make sure we allocate enough extra low memory so that we
+                * don't run out of DMA buffers for 32-bit devices.
                 */
-               low_size = swiotlb_size_or_default() + (8UL<<20);
+               low_size = max(swiotlb_size_or_default() + (8UL<<20), 256UL<<20);
                auto_set = true;
        } else {
                /* passed with crashkernel=0,low ? */
@@ -1222,8 +1224,7 @@ void __init setup_arch(char **cmdline_p)
        init_cpu_to_node();
 
        init_apic_mappings();
-       if (x86_io_apic_ops.init)
-               x86_io_apic_ops.init();
+       io_apic_init_mappings();
 
        kvm_guest_init();
 
index 1ea14fd53933bae96dc5f8e3fec99575a3c3c454..206996c1669db344aba7ff072f734552723e7938 100644 (file)
@@ -26,8 +26,8 @@
 
 #include <asm/processor.h>
 #include <asm/ucontext.h>
-#include <asm/i387.h>
-#include <asm/fpu-internal.h>
+#include <asm/fpu/internal.h>
+#include <asm/fpu/signal.h>
 #include <asm/vdso.h>
 #include <asm/mce.h>
 #include <asm/sighandling.h>
@@ -103,7 +103,7 @@ int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
                get_user_ex(buf, &sc->fpstate);
        } get_user_catch(err);
 
-       err |= restore_xstate_sig(buf, config_enabled(CONFIG_X86_32));
+       err |= fpu__restore_sig(buf, config_enabled(CONFIG_X86_32));
 
        force_iret();
 
@@ -199,6 +199,7 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
        unsigned long sp = regs->sp;
        unsigned long buf_fx = 0;
        int onsigstack = on_sig_stack(sp);
+       struct fpu *fpu = &current->thread.fpu;
 
        /* redzone */
        if (config_enabled(CONFIG_X86_64))
@@ -218,9 +219,9 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
                }
        }
 
-       if (used_math()) {
-               sp = alloc_mathframe(sp, config_enabled(CONFIG_X86_32),
-                                    &buf_fx, &math_size);
+       if (fpu->fpstate_active) {
+               sp = fpu__alloc_mathframe(sp, config_enabled(CONFIG_X86_32),
+                                         &buf_fx, &math_size);
                *fpstate = (void __user *)sp;
        }
 
@@ -234,8 +235,8 @@ get_sigframe(struct k_sigaction *ka, struct pt_regs *regs, size_t frame_size,
                return (void __user *)-1L;
 
        /* save i387 and extended state */
-       if (used_math() &&
-           save_xstate_sig(*fpstate, (void __user *)buf_fx, math_size) < 0)
+       if (fpu->fpstate_active &&
+           copy_fpstate_to_sigframe(*fpstate, (void __user *)buf_fx, math_size) < 0)
                return (void __user *)-1L;
 
        return (void __user *)sp;
@@ -593,6 +594,22 @@ badframe:
        return 0;
 }
 
+static inline int is_ia32_compat_frame(void)
+{
+       return config_enabled(CONFIG_IA32_EMULATION) &&
+              test_thread_flag(TIF_IA32);
+}
+
+static inline int is_ia32_frame(void)
+{
+       return config_enabled(CONFIG_X86_32) || is_ia32_compat_frame();
+}
+
+static inline int is_x32_frame(void)
+{
+       return config_enabled(CONFIG_X86_X32_ABI) && test_thread_flag(TIF_X32);
+}
+
 static int
 setup_rt_frame(struct ksignal *ksig, struct pt_regs *regs)
 {
@@ -617,6 +634,7 @@ static void
 handle_signal(struct ksignal *ksig, struct pt_regs *regs)
 {
        bool stepping, failed;
+       struct fpu *fpu = &current->thread.fpu;
 
        /* Are we from a system call? */
        if (syscall_get_nr(current, regs) >= 0) {
@@ -665,8 +683,8 @@ handle_signal(struct ksignal *ksig, struct pt_regs *regs)
                /*
                 * Ensure the signal handler starts with the new fpu state.
                 */
-               if (used_math())
-                       fpu_reset_state(current);
+               if (fpu->fpstate_active)
+                       fpu__clear(fpu);
        }
        signal_setup_done(failed, ksig, stepping);
 }
index be8e1bde07aa47ff373f0245e0f4b7d6d2edcfd5..15aaa69bbb5eff9596e49000b38b90abce636052 100644 (file)
@@ -170,8 +170,7 @@ static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
 
 asmlinkage __visible void smp_reboot_interrupt(void)
 {
-       ack_APIC_irq();
-       irq_enter();
+       ipi_entering_ack_irq();
        stop_this_cpu(NULL);
        irq_exit();
 }
@@ -265,12 +264,6 @@ __visible void smp_reschedule_interrupt(struct pt_regs *regs)
         */
 }
 
-static inline void smp_entering_irq(void)
-{
-       ack_APIC_irq();
-       irq_enter();
-}
-
 __visible void smp_trace_reschedule_interrupt(struct pt_regs *regs)
 {
        /*
@@ -279,7 +272,7 @@ __visible void smp_trace_reschedule_interrupt(struct pt_regs *regs)
         * scheduler_ipi(). This is OK, since those functions are allowed
         * to nest.
         */
-       smp_entering_irq();
+       ipi_entering_ack_irq();
        trace_reschedule_entry(RESCHEDULE_VECTOR);
        __smp_reschedule_interrupt();
        trace_reschedule_exit(RESCHEDULE_VECTOR);
@@ -297,14 +290,14 @@ static inline void __smp_call_function_interrupt(void)
 
 __visible void smp_call_function_interrupt(struct pt_regs *regs)
 {
-       smp_entering_irq();
+       ipi_entering_ack_irq();
        __smp_call_function_interrupt();
        exiting_irq();
 }
 
 __visible void smp_trace_call_function_interrupt(struct pt_regs *regs)
 {
-       smp_entering_irq();
+       ipi_entering_ack_irq();
        trace_call_function_entry(CALL_FUNCTION_VECTOR);
        __smp_call_function_interrupt();
        trace_call_function_exit(CALL_FUNCTION_VECTOR);
@@ -319,14 +312,14 @@ static inline void __smp_call_function_single_interrupt(void)
 
 __visible void smp_call_function_single_interrupt(struct pt_regs *regs)
 {
-       smp_entering_irq();
+       ipi_entering_ack_irq();
        __smp_call_function_single_interrupt();
        exiting_irq();
 }
 
 __visible void smp_trace_call_function_single_interrupt(struct pt_regs *regs)
 {
-       smp_entering_irq();
+       ipi_entering_ack_irq();
        trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR);
        __smp_call_function_single_interrupt();
        trace_call_function_single_exit(CALL_FUNCTION_SINGLE_VECTOR);
index 50e547eac8cd4b64e9e6892410366ec3a64a7a13..8add66b22f333cbc5e8936b41e64525b7f49e1bc 100644 (file)
@@ -68,8 +68,7 @@
 #include <asm/mwait.h>
 #include <asm/apic.h>
 #include <asm/io_apic.h>
-#include <asm/i387.h>
-#include <asm/fpu-internal.h>
+#include <asm/fpu/internal.h>
 #include <asm/setup.h>
 #include <asm/uv/uv.h>
 #include <linux/mc146818rtc.h>
@@ -314,10 +313,10 @@ topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name)
                cpu1, name, cpu2, cpu_to_node(cpu1), cpu_to_node(cpu2));
 }
 
-#define link_mask(_m, c1, c2)                                          \
+#define link_mask(mfunc, c1, c2)                                       \
 do {                                                                   \
-       cpumask_set_cpu((c1), cpu_##_m##_mask(c2));                     \
-       cpumask_set_cpu((c2), cpu_##_m##_mask(c1));                     \
+       cpumask_set_cpu((c1), mfunc(c2));                               \
+       cpumask_set_cpu((c2), mfunc(c1));                               \
 } while (0)
 
 static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
@@ -398,9 +397,9 @@ void set_cpu_sibling_map(int cpu)
        cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
 
        if (!has_mp) {
-               cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
+               cpumask_set_cpu(cpu, topology_sibling_cpumask(cpu));
                cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
-               cpumask_set_cpu(cpu, cpu_core_mask(cpu));
+               cpumask_set_cpu(cpu, topology_core_cpumask(cpu));
                c->booted_cores = 1;
                return;
        }
@@ -409,32 +408,34 @@ void set_cpu_sibling_map(int cpu)
                o = &cpu_data(i);
 
                if ((i == cpu) || (has_smt && match_smt(c, o)))
-                       link_mask(sibling, cpu, i);
+                       link_mask(topology_sibling_cpumask, cpu, i);
 
                if ((i == cpu) || (has_mp && match_llc(c, o)))
-                       link_mask(llc_shared, cpu, i);
+                       link_mask(cpu_llc_shared_mask, cpu, i);
 
        }
 
        /*
         * This needs a separate iteration over the cpus because we rely on all
-        * cpu_sibling_mask links to be set-up.
+        * topology_sibling_cpumask links to be set-up.
         */
        for_each_cpu(i, cpu_sibling_setup_mask) {
                o = &cpu_data(i);
 
                if ((i == cpu) || (has_mp && match_die(c, o))) {
-                       link_mask(core, cpu, i);
+                       link_mask(topology_core_cpumask, cpu, i);
 
                        /*
                         *  Does this new cpu bringup a new core?
                         */
-                       if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) {
+                       if (cpumask_weight(
+                           topology_sibling_cpumask(cpu)) == 1) {
                                /*
                                 * for each core in package, increment
                                 * the booted_cores for this new cpu
                                 */
-                               if (cpumask_first(cpu_sibling_mask(i)) == i)
+                               if (cpumask_first(
+                                   topology_sibling_cpumask(i)) == i)
                                        c->booted_cores++;
                                /*
                                 * increment the core count for all
@@ -513,6 +514,40 @@ void __inquire_remote_apic(int apicid)
        }
 }
 
+/*
+ * The Multiprocessor Specification 1.4 (1997) example code suggests
+ * that there should be a 10ms delay between the BSP asserting INIT
+ * and de-asserting INIT, when starting a remote processor.
+ * But that slows boot and resume on modern processors, which include
+ * many cores and don't require that delay.
+ *
+ * Cmdline "init_cpu_udelay=" is available to over-ride this delay.
+ * Modern processor families are quirked to remove the delay entirely.
+ */
+#define UDELAY_10MS_DEFAULT 10000
+
+static unsigned int init_udelay = UDELAY_10MS_DEFAULT;
+
+static int __init cpu_init_udelay(char *str)
+{
+       get_option(&str, &init_udelay);
+
+       return 0;
+}
+early_param("cpu_init_udelay", cpu_init_udelay);
+
+static void __init smp_quirk_init_udelay(void)
+{
+       /* if cmdline changed it from default, leave it alone */
+       if (init_udelay != UDELAY_10MS_DEFAULT)
+               return;
+
+       /* if modern processor, use no delay */
+       if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
+           ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF)))
+               init_udelay = 0;
+}
+
 /*
  * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
  * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
@@ -555,7 +590,7 @@ wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip)
 static int
 wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
 {
-       unsigned long send_status, accept_status = 0;
+       unsigned long send_status = 0, accept_status = 0;
        int maxlvt, num_starts, j;
 
        maxlvt = lapic_get_maxlvt();
@@ -583,7 +618,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
        pr_debug("Waiting for send to finish...\n");
        send_status = safe_apic_wait_icr_idle();
 
-       mdelay(10);
+       udelay(init_udelay);
 
        pr_debug("Deasserting INIT\n");
 
@@ -651,6 +686,7 @@ wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
                 * Give the other CPU some time to accept the IPI.
                 */
                udelay(200);
+
                if (maxlvt > 3)         /* Due to the Pentium erratum 3AP.  */
                        apic_write(APIC_ESR, 0);
                accept_status = (apic_read(APIC_ESR) & 0xEF);
@@ -792,8 +828,6 @@ void common_cpu_up(unsigned int cpu, struct task_struct *idle)
        clear_tsk_thread_flag(idle, TIF_FORK);
        initial_gs = per_cpu_offset(cpu);
 #endif
-       per_cpu(kernel_stack, cpu) =
-               (unsigned long)task_stack_page(idle) + THREAD_SIZE;
 }
 
 /*
@@ -1009,8 +1043,8 @@ static __init void disable_smp(void)
                physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
        else
                physid_set_mask_of_physid(0, &phys_cpu_present_map);
-       cpumask_set_cpu(0, cpu_sibling_mask(0));
-       cpumask_set_cpu(0, cpu_core_mask(0));
+       cpumask_set_cpu(0, topology_sibling_cpumask(0));
+       cpumask_set_cpu(0, topology_core_cpumask(0));
 }
 
 enum {
@@ -1176,6 +1210,8 @@ void __init native_smp_prepare_cpus(unsigned int max_cpus)
                uv_system_init();
 
        set_mtrr_aps_delayed_init();
+
+       smp_quirk_init_udelay();
 }
 
 void arch_enable_nonboot_cpus_begin(void)
@@ -1293,22 +1329,22 @@ static void remove_siblinginfo(int cpu)
        int sibling;
        struct cpuinfo_x86 *c = &cpu_data(cpu);
 
-       for_each_cpu(sibling, cpu_core_mask(cpu)) {
-               cpumask_clear_cpu(cpu, cpu_core_mask(sibling));
+       for_each_cpu(sibling, topology_core_cpumask(cpu)) {
+               cpumask_clear_cpu(cpu, topology_core_cpumask(sibling));
                /*/
                 * last thread sibling in this cpu core going down
                 */
-               if (cpumask_weight(cpu_sibling_mask(cpu)) == 1)
+               if (cpumask_weight(topology_sibling_cpumask(cpu)) == 1)
                        cpu_data(sibling).booted_cores--;
        }
 
-       for_each_cpu(sibling, cpu_sibling_mask(cpu))
-               cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling));
+       for_each_cpu(sibling, topology_sibling_cpumask(cpu))
+               cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling));
        for_each_cpu(sibling, cpu_llc_shared_mask(cpu))
                cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling));
        cpumask_clear(cpu_llc_shared_mask(cpu));
-       cpumask_clear(cpu_sibling_mask(cpu));
-       cpumask_clear(cpu_core_mask(cpu));
+       cpumask_clear(topology_sibling_cpumask(cpu));
+       cpumask_clear(topology_core_cpumask(cpu));
        c->phys_proc_id = 0;
        c->cpu_core_id = 0;
        cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
diff --git a/arch/x86/kernel/syscall_32.c b/arch/x86/kernel/syscall_32.c
deleted file mode 100644 (file)
index 3777189..0000000
+++ /dev/null
@@ -1,33 +0,0 @@
-/* System call table for i386. */
-
-#include <linux/linkage.h>
-#include <linux/sys.h>
-#include <linux/cache.h>
-#include <asm/asm-offsets.h>
-
-#ifdef CONFIG_IA32_EMULATION
-#define SYM(sym, compat) compat
-#else
-#define SYM(sym, compat) sym
-#define ia32_sys_call_table sys_call_table
-#define __NR_ia32_syscall_max __NR_syscall_max
-#endif
-
-#define __SYSCALL_I386(nr, sym, compat) extern asmlinkage void SYM(sym, compat)(void) ;
-#include <asm/syscalls_32.h>
-#undef __SYSCALL_I386
-
-#define __SYSCALL_I386(nr, sym, compat) [nr] = SYM(sym, compat),
-
-typedef asmlinkage void (*sys_call_ptr_t)(void);
-
-extern asmlinkage void sys_ni_syscall(void);
-
-__visible const sys_call_ptr_t ia32_sys_call_table[__NR_ia32_syscall_max+1] = {
-       /*
-        * Smells like a compiler bug -- it doesn't work
-        * when the & below is removed.
-        */
-       [0 ... __NR_ia32_syscall_max] = &sys_ni_syscall,
-#include <asm/syscalls_32.h>
-};
diff --git a/arch/x86/kernel/syscall_64.c b/arch/x86/kernel/syscall_64.c
deleted file mode 100644 (file)
index 4ac730b..0000000
+++ /dev/null
@@ -1,32 +0,0 @@
-/* System call table for x86-64. */
-
-#include <linux/linkage.h>
-#include <linux/sys.h>
-#include <linux/cache.h>
-#include <asm/asm-offsets.h>
-#include <asm/syscall.h>
-
-#define __SYSCALL_COMMON(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
-
-#ifdef CONFIG_X86_X32_ABI
-# define __SYSCALL_X32(nr, sym, compat) __SYSCALL_64(nr, sym, compat)
-#else
-# define __SYSCALL_X32(nr, sym, compat) /* nothing */
-#endif
-
-#define __SYSCALL_64(nr, sym, compat) extern asmlinkage void sym(void) ;
-#include <asm/syscalls_64.h>
-#undef __SYSCALL_64
-
-#define __SYSCALL_64(nr, sym, compat) [nr] = sym,
-
-extern void sys_ni_syscall(void);
-
-asmlinkage const sys_call_ptr_t sys_call_table[__NR_syscall_max+1] = {
-       /*
-        * Smells like a compiler bug -- it doesn't work
-        * when the & below is removed.
-        */
-       [0 ... __NR_syscall_max] = &sys_ni_syscall,
-#include <asm/syscalls_64.h>
-};
index 324ab524768756b1987efbee11f06ce3ba24562b..f5791927aa644493354dd487a35a1111fdf676d6 100644 (file)
 #include <asm/ftrace.h>
 #include <asm/traps.h>
 #include <asm/desc.h>
-#include <asm/i387.h>
-#include <asm/fpu-internal.h>
+#include <asm/fpu/internal.h>
 #include <asm/mce.h>
 #include <asm/fixmap.h>
 #include <asm/mach_traps.h>
 #include <asm/alternative.h>
+#include <asm/fpu/xstate.h>
+#include <asm/trace/mpx.h>
 #include <asm/mpx.h>
 
 #ifdef CONFIG_X86_64
@@ -72,8 +73,7 @@ gate_desc debug_idt_table[NR_VECTORS] __page_aligned_bss;
 #else
 #include <asm/processor-flags.h>
 #include <asm/setup.h>
-
-asmlinkage int system_call(void);
+#include <asm/proto.h>
 #endif
 
 /* Must be page-aligned because the real IDT is used in a fixmap. */
@@ -371,10 +371,8 @@ dotraplinkage void do_double_fault(struct pt_regs *regs, long error_code)
 
 dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
 {
-       struct task_struct *tsk = current;
-       struct xsave_struct *xsave_buf;
        enum ctx_state prev_state;
-       struct bndcsr *bndcsr;
+       const struct bndcsr *bndcsr;
        siginfo_t *info;
 
        prev_state = exception_enter();
@@ -393,15 +391,15 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
 
        /*
         * We need to look at BNDSTATUS to resolve this exception.
-        * It is not directly accessible, though, so we need to
-        * do an xsave and then pull it out of the xsave buffer.
+        * A NULL here might mean that it is in its 'init state',
+        * which is all zeros which indicates MPX was not
+        * responsible for the exception.
         */
-       fpu_save_init(&tsk->thread.fpu);
-       xsave_buf = &(tsk->thread.fpu.state->xsave);
-       bndcsr = get_xsave_addr(xsave_buf, XSTATE_BNDCSR);
+       bndcsr = get_xsave_field_ptr(XSTATE_BNDCSR);
        if (!bndcsr)
                goto exit_trap;
 
+       trace_bounds_exception_mpx(bndcsr);
        /*
         * The error code field of the BNDSTATUS register communicates status
         * information of a bound range exception #BR or operation involving
@@ -409,11 +407,11 @@ dotraplinkage void do_bounds(struct pt_regs *regs, long error_code)
         */
        switch (bndcsr->bndstatus & MPX_BNDSTA_ERROR_CODE) {
        case 2: /* Bound directory has invalid entry. */
-               if (mpx_handle_bd_fault(xsave_buf))
+               if (mpx_handle_bd_fault())
                        goto exit_trap;
                break; /* Success, it was handled */
        case 1: /* Bound violation. */
-               info = mpx_generate_siginfo(regs, xsave_buf);
+               info = mpx_generate_siginfo(regs);
                if (IS_ERR(info)) {
                        /*
                         * We failed to decode the MPX instruction.  Act as if
@@ -709,8 +707,8 @@ NOKPROBE_SYMBOL(do_debug);
 static void math_error(struct pt_regs *regs, int error_code, int trapnr)
 {
        struct task_struct *task = current;
+       struct fpu *fpu = &task->thread.fpu;
        siginfo_t info;
-       unsigned short err;
        char *str = (trapnr == X86_TRAP_MF) ? "fpu exception" :
                                                "simd exception";
 
@@ -718,8 +716,7 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
                return;
        conditional_sti(regs);
 
-       if (!user_mode(regs))
-       {
+       if (!user_mode(regs)) {
                if (!fixup_exception(regs)) {
                        task->thread.error_code = error_code;
                        task->thread.trap_nr = trapnr;
@@ -731,62 +728,20 @@ static void math_error(struct pt_regs *regs, int error_code, int trapnr)
        /*
         * Save the info for the exception handler and clear the error.
         */
-       unlazy_fpu(task);
-       task->thread.trap_nr = trapnr;
+       fpu__save(fpu);
+
+       task->thread.trap_nr    = trapnr;
        task->thread.error_code = error_code;
-       info.si_signo = SIGFPE;
-       info.si_errno = 0;
-       info.si_addr = (void __user *)uprobe_get_trap_addr(regs);
-       if (trapnr == X86_TRAP_MF) {
-               unsigned short cwd, swd;
-               /*
-                * (~cwd & swd) will mask out exceptions that are not set to unmasked
-                * status.  0x3f is the exception bits in these regs, 0x200 is the
-                * C1 reg you need in case of a stack fault, 0x040 is the stack
-                * fault bit.  We should only be taking one exception at a time,
-                * so if this combination doesn't produce any single exception,
-                * then we have a bad program that isn't synchronizing its FPU usage
-                * and it will suffer the consequences since we won't be able to
-                * fully reproduce the context of the exception
-                */
-               cwd = get_fpu_cwd(task);
-               swd = get_fpu_swd(task);
+       info.si_signo           = SIGFPE;
+       info.si_errno           = 0;
+       info.si_addr            = (void __user *)uprobe_get_trap_addr(regs);
 
-               err = swd & ~cwd;
-       } else {
-               /*
-                * The SIMD FPU exceptions are handled a little differently, as there
-                * is only a single status/control register.  Thus, to determine which
-                * unmasked exception was caught we must mask the exception mask bits
-                * at 0x1f80, and then use these to mask the exception bits at 0x3f.
-                */
-               unsigned short mxcsr = get_fpu_mxcsr(task);
-               err = ~(mxcsr >> 7) & mxcsr;
-       }
+       info.si_code = fpu__exception_code(fpu, trapnr);
 
-       if (err & 0x001) {      /* Invalid op */
-               /*
-                * swd & 0x240 == 0x040: Stack Underflow
-                * swd & 0x240 == 0x240: Stack Overflow
-                * User must clear the SF bit (0x40) if set
-                */
-               info.si_code = FPE_FLTINV;
-       } else if (err & 0x004) { /* Divide by Zero */
-               info.si_code = FPE_FLTDIV;
-       } else if (err & 0x008) { /* Overflow */
-               info.si_code = FPE_FLTOVF;
-       } else if (err & 0x012) { /* Denormal, Underflow */
-               info.si_code = FPE_FLTUND;
-       } else if (err & 0x020) { /* Precision */
-               info.si_code = FPE_FLTRES;
-       } else {
-               /*
-                * If we're using IRQ 13, or supposedly even some trap
-                * X86_TRAP_MF implementations, it's possible
-                * we get a spurious trap, which is not an error.
-                */
+       /* Retry when we get spurious exceptions: */
+       if (!info.si_code)
                return;
-       }
+
        force_sig_info(SIGFPE, &info, task);
 }
 
@@ -813,62 +768,8 @@ dotraplinkage void
 do_spurious_interrupt_bug(struct pt_regs *regs, long error_code)
 {
        conditional_sti(regs);
-#if 0
-       /* No need to warn about this any longer. */
-       pr_info("Ignoring P6 Local APIC Spurious Interrupt Bug...\n");
-#endif
-}
-
-asmlinkage __visible void __attribute__((weak)) smp_thermal_interrupt(void)
-{
 }
 
-asmlinkage __visible void __attribute__((weak)) smp_threshold_interrupt(void)
-{
-}
-
-/*
- * 'math_state_restore()' saves the current math information in the
- * old math state array, and gets the new ones from the current task
- *
- * Careful.. There are problems with IBM-designed IRQ13 behaviour.
- * Don't touch unless you *really* know how it works.
- *
- * Must be called with kernel preemption disabled (eg with local
- * local interrupts as in the case of do_device_not_available).
- */
-void math_state_restore(void)
-{
-       struct task_struct *tsk = current;
-
-       if (!tsk_used_math(tsk)) {
-               local_irq_enable();
-               /*
-                * does a slab alloc which can sleep
-                */
-               if (init_fpu(tsk)) {
-                       /*
-                        * ran out of memory!
-                        */
-                       do_group_exit(SIGKILL);
-                       return;
-               }
-               local_irq_disable();
-       }
-
-       /* Avoid __kernel_fpu_begin() right after __thread_fpu_begin() */
-       kernel_fpu_disable();
-       __thread_fpu_begin(tsk);
-       if (unlikely(restore_fpu_checking(tsk))) {
-               fpu_reset_state(tsk);
-               force_sig_info(SIGSEGV, SEND_SIG_PRIV, tsk);
-       } else {
-               tsk->thread.fpu_counter++;
-       }
-       kernel_fpu_enable();
-}
-EXPORT_SYMBOL_GPL(math_state_restore);
-
 dotraplinkage void
 do_device_not_available(struct pt_regs *regs, long error_code)
 {
@@ -889,7 +790,7 @@ do_device_not_available(struct pt_regs *regs, long error_code)
                return;
        }
 #endif
-       math_state_restore(); /* interrupts still off */
+       fpu__restore(&current->thread.fpu); /* interrupts still off */
 #ifdef CONFIG_X86_32
        conditional_sti(regs);
 #endif
@@ -992,13 +893,13 @@ void __init trap_init(void)
                set_bit(i, used_vectors);
 
 #ifdef CONFIG_IA32_EMULATION
-       set_system_intr_gate(IA32_SYSCALL_VECTOR, ia32_syscall);
+       set_system_intr_gate(IA32_SYSCALL_VECTOR, entry_INT80_compat);
        set_bit(IA32_SYSCALL_VECTOR, used_vectors);
 #endif
 
 #ifdef CONFIG_X86_32
-       set_system_trap_gate(SYSCALL_VECTOR, &system_call);
-       set_bit(SYSCALL_VECTOR, used_vectors);
+       set_system_trap_gate(IA32_SYSCALL_VECTOR, entry_INT80_32);
+       set_bit(IA32_SYSCALL_VECTOR, used_vectors);
 #endif
 
        /*
index 26488487bc61e8fb5fcc76cf4ec245cb8ff1ebd7..dd8d0791dfb5021930793689376f0d4f064e5b38 100644 (file)
@@ -113,7 +113,7 @@ static void check_tsc_warp(unsigned int timeout)
  */
 static inline unsigned int loop_timeout(int cpu)
 {
-       return (cpumask_weight(cpu_core_mask(cpu)) > 1) ? 2 : 20;
+       return (cpumask_weight(topology_core_cpumask(cpu)) > 1) ? 2 : 20;
 }
 
 /*
index 0b81ad67da07fa36e57577de5f7165ab320a55a1..66476244731ef8fba8fafbe1bb6cbd17f1a18b9c 100644 (file)
@@ -29,6 +29,7 @@
 #include <linux/kdebug.h>
 #include <asm/processor.h>
 #include <asm/insn.h>
+#include <asm/mmu_context.h>
 
 /* Post-execution fixups. */
 
@@ -312,11 +313,6 @@ static int uprobe_init_insn(struct arch_uprobe *auprobe, struct insn *insn, bool
 }
 
 #ifdef CONFIG_X86_64
-static inline bool is_64bit_mm(struct mm_struct *mm)
-{
-       return  !config_enabled(CONFIG_IA32_EMULATION) ||
-               !(mm->context.ia32_compat == TIF_IA32);
-}
 /*
  * If arch_uprobe->insn doesn't use rip-relative addressing, return
  * immediately.  Otherwise, rewrite the instruction so that it accesses
@@ -497,10 +493,6 @@ static void riprel_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
        }
 }
 #else /* 32-bit: */
-static inline bool is_64bit_mm(struct mm_struct *mm)
-{
-       return false;
-}
 /*
  * No RIP-relative addressing on 32-bit
  */
diff --git a/arch/x86/kernel/vsyscall_64.c b/arch/x86/kernel/vsyscall_64.c
deleted file mode 100644 (file)
index 2dcc6ff..0000000
+++ /dev/null
@@ -1,335 +0,0 @@
-/*
- * Copyright (c) 2012-2014 Andy Lutomirski <luto@amacapital.net>
- *
- * Based on the original implementation which is:
- *  Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
- *  Copyright 2003 Andi Kleen, SuSE Labs.
- *
- *  Parts of the original code have been moved to arch/x86/vdso/vma.c
- *
- * This file implements vsyscall emulation.  vsyscalls are a legacy ABI:
- * Userspace can request certain kernel services by calling fixed
- * addresses.  This concept is problematic:
- *
- * - It interferes with ASLR.
- * - It's awkward to write code that lives in kernel addresses but is
- *   callable by userspace at fixed addresses.
- * - The whole concept is impossible for 32-bit compat userspace.
- * - UML cannot easily virtualize a vsyscall.
- *
- * As of mid-2014, I believe that there is no new userspace code that
- * will use a vsyscall if the vDSO is present.  I hope that there will
- * soon be no new userspace code that will ever use a vsyscall.
- *
- * The code in this file emulates vsyscalls when notified of a page
- * fault to a vsyscall address.
- */
-
-#include <linux/kernel.h>
-#include <linux/timer.h>
-#include <linux/syscalls.h>
-#include <linux/ratelimit.h>
-
-#include <asm/vsyscall.h>
-#include <asm/unistd.h>
-#include <asm/fixmap.h>
-#include <asm/traps.h>
-
-#define CREATE_TRACE_POINTS
-#include "vsyscall_trace.h"
-
-static enum { EMULATE, NATIVE, NONE } vsyscall_mode = EMULATE;
-
-static int __init vsyscall_setup(char *str)
-{
-       if (str) {
-               if (!strcmp("emulate", str))
-                       vsyscall_mode = EMULATE;
-               else if (!strcmp("native", str))
-                       vsyscall_mode = NATIVE;
-               else if (!strcmp("none", str))
-                       vsyscall_mode = NONE;
-               else
-                       return -EINVAL;
-
-               return 0;
-       }
-
-       return -EINVAL;
-}
-early_param("vsyscall", vsyscall_setup);
-
-static void warn_bad_vsyscall(const char *level, struct pt_regs *regs,
-                             const char *message)
-{
-       if (!show_unhandled_signals)
-               return;
-
-       printk_ratelimited("%s%s[%d] %s ip:%lx cs:%lx sp:%lx ax:%lx si:%lx di:%lx\n",
-                          level, current->comm, task_pid_nr(current),
-                          message, regs->ip, regs->cs,
-                          regs->sp, regs->ax, regs->si, regs->di);
-}
-
-static int addr_to_vsyscall_nr(unsigned long addr)
-{
-       int nr;
-
-       if ((addr & ~0xC00UL) != VSYSCALL_ADDR)
-               return -EINVAL;
-
-       nr = (addr & 0xC00UL) >> 10;
-       if (nr >= 3)
-               return -EINVAL;
-
-       return nr;
-}
-
-static bool write_ok_or_segv(unsigned long ptr, size_t size)
-{
-       /*
-        * XXX: if access_ok, get_user, and put_user handled
-        * sig_on_uaccess_error, this could go away.
-        */
-
-       if (!access_ok(VERIFY_WRITE, (void __user *)ptr, size)) {
-               siginfo_t info;
-               struct thread_struct *thread = &current->thread;
-
-               thread->error_code      = 6;  /* user fault, no page, write */
-               thread->cr2             = ptr;
-               thread->trap_nr         = X86_TRAP_PF;
-
-               memset(&info, 0, sizeof(info));
-               info.si_signo           = SIGSEGV;
-               info.si_errno           = 0;
-               info.si_code            = SEGV_MAPERR;
-               info.si_addr            = (void __user *)ptr;
-
-               force_sig_info(SIGSEGV, &info, current);
-               return false;
-       } else {
-               return true;
-       }
-}
-
-bool emulate_vsyscall(struct pt_regs *regs, unsigned long address)
-{
-       struct task_struct *tsk;
-       unsigned long caller;
-       int vsyscall_nr, syscall_nr, tmp;
-       int prev_sig_on_uaccess_error;
-       long ret;
-
-       /*
-        * No point in checking CS -- the only way to get here is a user mode
-        * trap to a high address, which means that we're in 64-bit user code.
-        */
-
-       WARN_ON_ONCE(address != regs->ip);
-
-       if (vsyscall_mode == NONE) {
-               warn_bad_vsyscall(KERN_INFO, regs,
-                                 "vsyscall attempted with vsyscall=none");
-               return false;
-       }
-
-       vsyscall_nr = addr_to_vsyscall_nr(address);
-
-       trace_emulate_vsyscall(vsyscall_nr);
-
-       if (vsyscall_nr < 0) {
-               warn_bad_vsyscall(KERN_WARNING, regs,
-                                 "misaligned vsyscall (exploit attempt or buggy program) -- look up the vsyscall kernel parameter if you need a workaround");
-               goto sigsegv;
-       }
-
-       if (get_user(caller, (unsigned long __user *)regs->sp) != 0) {
-               warn_bad_vsyscall(KERN_WARNING, regs,
-                                 "vsyscall with bad stack (exploit attempt?)");
-               goto sigsegv;
-       }
-
-       tsk = current;
-
-       /*
-        * Check for access_ok violations and find the syscall nr.
-        *
-        * NULL is a valid user pointer (in the access_ok sense) on 32-bit and
-        * 64-bit, so we don't need to special-case it here.  For all the
-        * vsyscalls, NULL means "don't write anything" not "write it at
-        * address 0".
-        */
-       switch (vsyscall_nr) {
-       case 0:
-               if (!write_ok_or_segv(regs->di, sizeof(struct timeval)) ||
-                   !write_ok_or_segv(regs->si, sizeof(struct timezone))) {
-                       ret = -EFAULT;
-                       goto check_fault;
-               }
-
-               syscall_nr = __NR_gettimeofday;
-               break;
-
-       case 1:
-               if (!write_ok_or_segv(regs->di, sizeof(time_t))) {
-                       ret = -EFAULT;
-                       goto check_fault;
-               }
-
-               syscall_nr = __NR_time;
-               break;
-
-       case 2:
-               if (!write_ok_or_segv(regs->di, sizeof(unsigned)) ||
-                   !write_ok_or_segv(regs->si, sizeof(unsigned))) {
-                       ret = -EFAULT;
-                       goto check_fault;
-               }
-
-               syscall_nr = __NR_getcpu;
-               break;
-       }
-
-       /*
-        * Handle seccomp.  regs->ip must be the original value.
-        * See seccomp_send_sigsys and Documentation/prctl/seccomp_filter.txt.
-        *
-        * We could optimize the seccomp disabled case, but performance
-        * here doesn't matter.
-        */
-       regs->orig_ax = syscall_nr;
-       regs->ax = -ENOSYS;
-       tmp = secure_computing();
-       if ((!tmp && regs->orig_ax != syscall_nr) || regs->ip != address) {
-               warn_bad_vsyscall(KERN_DEBUG, regs,
-                                 "seccomp tried to change syscall nr or ip");
-               do_exit(SIGSYS);
-       }
-       regs->orig_ax = -1;
-       if (tmp)
-               goto do_ret;  /* skip requested */
-
-       /*
-        * With a real vsyscall, page faults cause SIGSEGV.  We want to
-        * preserve that behavior to make writing exploits harder.
-        */
-       prev_sig_on_uaccess_error = current_thread_info()->sig_on_uaccess_error;
-       current_thread_info()->sig_on_uaccess_error = 1;
-
-       ret = -EFAULT;
-       switch (vsyscall_nr) {
-       case 0:
-               ret = sys_gettimeofday(
-                       (struct timeval __user *)regs->di,
-                       (struct timezone __user *)regs->si);
-               break;
-
-       case 1:
-               ret = sys_time((time_t __user *)regs->di);
-               break;
-
-       case 2:
-               ret = sys_getcpu((unsigned __user *)regs->di,
-                                (unsigned __user *)regs->si,
-                                NULL);
-               break;
-       }
-
-       current_thread_info()->sig_on_uaccess_error = prev_sig_on_uaccess_error;
-
-check_fault:
-       if (ret == -EFAULT) {
-               /* Bad news -- userspace fed a bad pointer to a vsyscall. */
-               warn_bad_vsyscall(KERN_INFO, regs,
-                                 "vsyscall fault (exploit attempt?)");
-
-               /*
-                * If we failed to generate a signal for any reason,
-                * generate one here.  (This should be impossible.)
-                */
-               if (WARN_ON_ONCE(!sigismember(&tsk->pending.signal, SIGBUS) &&
-                                !sigismember(&tsk->pending.signal, SIGSEGV)))
-                       goto sigsegv;
-
-               return true;  /* Don't emulate the ret. */
-       }
-
-       regs->ax = ret;
-
-do_ret:
-       /* Emulate a ret instruction. */
-       regs->ip = caller;
-       regs->sp += 8;
-       return true;
-
-sigsegv:
-       force_sig(SIGSEGV, current);
-       return true;
-}
-
-/*
- * A pseudo VMA to allow ptrace access for the vsyscall page.  This only
- * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
- * not need special handling anymore:
- */
-static const char *gate_vma_name(struct vm_area_struct *vma)
-{
-       return "[vsyscall]";
-}
-static struct vm_operations_struct gate_vma_ops = {
-       .name = gate_vma_name,
-};
-static struct vm_area_struct gate_vma = {
-       .vm_start       = VSYSCALL_ADDR,
-       .vm_end         = VSYSCALL_ADDR + PAGE_SIZE,
-       .vm_page_prot   = PAGE_READONLY_EXEC,
-       .vm_flags       = VM_READ | VM_EXEC,
-       .vm_ops         = &gate_vma_ops,
-};
-
-struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
-{
-#ifdef CONFIG_IA32_EMULATION
-       if (!mm || mm->context.ia32_compat)
-               return NULL;
-#endif
-       if (vsyscall_mode == NONE)
-               return NULL;
-       return &gate_vma;
-}
-
-int in_gate_area(struct mm_struct *mm, unsigned long addr)
-{
-       struct vm_area_struct *vma = get_gate_vma(mm);
-
-       if (!vma)
-               return 0;
-
-       return (addr >= vma->vm_start) && (addr < vma->vm_end);
-}
-
-/*
- * Use this when you have no reliable mm, typically from interrupt
- * context. It is less reliable than using a task's mm and may give
- * false positives.
- */
-int in_gate_area_no_mm(unsigned long addr)
-{
-       return vsyscall_mode != NONE && (addr & PAGE_MASK) == VSYSCALL_ADDR;
-}
-
-void __init map_vsyscall(void)
-{
-       extern char __vsyscall_page;
-       unsigned long physaddr_vsyscall = __pa_symbol(&__vsyscall_page);
-
-       if (vsyscall_mode != NONE)
-               __set_fixmap(VSYSCALL_PAGE, physaddr_vsyscall,
-                            vsyscall_mode == NATIVE
-                            ? PAGE_KERNEL_VSYSCALL
-                            : PAGE_KERNEL_VVAR);
-
-       BUILD_BUG_ON((unsigned long)__fix_to_virt(VSYSCALL_PAGE) !=
-                    (unsigned long)VSYSCALL_ADDR);
-}
diff --git a/arch/x86/kernel/vsyscall_emu_64.S b/arch/x86/kernel/vsyscall_emu_64.S
deleted file mode 100644 (file)
index c9596a9..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * vsyscall_emu_64.S: Vsyscall emulation page
- *
- * Copyright (c) 2011 Andy Lutomirski
- *
- * Subject to the GNU General Public License, version 2
- */
-
-#include <linux/linkage.h>
-
-#include <asm/irq_vectors.h>
-#include <asm/page_types.h>
-#include <asm/unistd_64.h>
-
-__PAGE_ALIGNED_DATA
-       .globl __vsyscall_page
-       .balign PAGE_SIZE, 0xcc
-       .type __vsyscall_page, @object
-__vsyscall_page:
-
-       mov $__NR_gettimeofday, %rax
-       syscall
-       ret
-
-       .balign 1024, 0xcc
-       mov $__NR_time, %rax
-       syscall
-       ret
-
-       .balign 1024, 0xcc
-       mov $__NR_getcpu, %rax
-       syscall
-       ret
-
-       .balign 4096, 0xcc
-
-       .size __vsyscall_page, 4096
diff --git a/arch/x86/kernel/vsyscall_gtod.c b/arch/x86/kernel/vsyscall_gtod.c
deleted file mode 100644 (file)
index 51e3304..0000000
+++ /dev/null
@@ -1,70 +0,0 @@
-/*
- *  Copyright (C) 2001 Andrea Arcangeli <andrea@suse.de> SuSE
- *  Copyright 2003 Andi Kleen, SuSE Labs.
- *
- *  Modified for x86 32 bit architecture by
- *  Stefani Seibold <stefani@seibold.net>
- *  sponsored by Rohde & Schwarz GmbH & Co. KG Munich/Germany
- *
- *  Thanks to hpa@transmeta.com for some useful hint.
- *  Special thanks to Ingo Molnar for his early experience with
- *  a different vsyscall implementation for Linux/IA32 and for the name.
- *
- */
-
-#include <linux/timekeeper_internal.h>
-#include <asm/vgtod.h>
-#include <asm/vvar.h>
-
-DEFINE_VVAR(struct vsyscall_gtod_data, vsyscall_gtod_data);
-
-void update_vsyscall_tz(void)
-{
-       vsyscall_gtod_data.tz_minuteswest = sys_tz.tz_minuteswest;
-       vsyscall_gtod_data.tz_dsttime = sys_tz.tz_dsttime;
-}
-
-void update_vsyscall(struct timekeeper *tk)
-{
-       struct vsyscall_gtod_data *vdata = &vsyscall_gtod_data;
-
-       gtod_write_begin(vdata);
-
-       /* copy vsyscall data */
-       vdata->vclock_mode      = tk->tkr_mono.clock->archdata.vclock_mode;
-       vdata->cycle_last       = tk->tkr_mono.cycle_last;
-       vdata->mask             = tk->tkr_mono.mask;
-       vdata->mult             = tk->tkr_mono.mult;
-       vdata->shift            = tk->tkr_mono.shift;
-
-       vdata->wall_time_sec            = tk->xtime_sec;
-       vdata->wall_time_snsec          = tk->tkr_mono.xtime_nsec;
-
-       vdata->monotonic_time_sec       = tk->xtime_sec
-                                       + tk->wall_to_monotonic.tv_sec;
-       vdata->monotonic_time_snsec     = tk->tkr_mono.xtime_nsec
-                                       + ((u64)tk->wall_to_monotonic.tv_nsec
-                                               << tk->tkr_mono.shift);
-       while (vdata->monotonic_time_snsec >=
-                                       (((u64)NSEC_PER_SEC) << tk->tkr_mono.shift)) {
-               vdata->monotonic_time_snsec -=
-                                       ((u64)NSEC_PER_SEC) << tk->tkr_mono.shift;
-               vdata->monotonic_time_sec++;
-       }
-
-       vdata->wall_time_coarse_sec     = tk->xtime_sec;
-       vdata->wall_time_coarse_nsec    = (long)(tk->tkr_mono.xtime_nsec >>
-                                                tk->tkr_mono.shift);
-
-       vdata->monotonic_time_coarse_sec =
-               vdata->wall_time_coarse_sec + tk->wall_to_monotonic.tv_sec;
-       vdata->monotonic_time_coarse_nsec =
-               vdata->wall_time_coarse_nsec + tk->wall_to_monotonic.tv_nsec;
-
-       while (vdata->monotonic_time_coarse_nsec >= NSEC_PER_SEC) {
-               vdata->monotonic_time_coarse_nsec -= NSEC_PER_SEC;
-               vdata->monotonic_time_coarse_sec++;
-       }
-
-       gtod_write_end(vdata);
-}
diff --git a/arch/x86/kernel/vsyscall_trace.h b/arch/x86/kernel/vsyscall_trace.h
deleted file mode 100644 (file)
index a8b2ede..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-#undef TRACE_SYSTEM
-#define TRACE_SYSTEM vsyscall
-
-#if !defined(__VSYSCALL_TRACE_H) || defined(TRACE_HEADER_MULTI_READ)
-#define __VSYSCALL_TRACE_H
-
-#include <linux/tracepoint.h>
-
-TRACE_EVENT(emulate_vsyscall,
-
-           TP_PROTO(int nr),
-
-           TP_ARGS(nr),
-
-           TP_STRUCT__entry(__field(int, nr)),
-
-           TP_fast_assign(
-                          __entry->nr = nr;
-                          ),
-
-           TP_printk("nr = %d", __entry->nr)
-);
-
-#endif
-
-#undef TRACE_INCLUDE_PATH
-#define TRACE_INCLUDE_PATH ../../arch/x86/kernel
-#define TRACE_INCLUDE_FILE vsyscall_trace
-#include <trace/define_trace.h>
index 37d8fa4438f056b7611077a578a288d4250972b6..a0695be19864eda009742b142ba2046e13ad05f9 100644 (file)
@@ -75,7 +75,5 @@ EXPORT_SYMBOL(native_load_gs_index);
 
 #ifdef CONFIG_PREEMPT
 EXPORT_SYMBOL(___preempt_schedule);
-#ifdef CONFIG_CONTEXT_TRACKING
-EXPORT_SYMBOL(___preempt_schedule_context);
-#endif
+EXPORT_SYMBOL(___preempt_schedule_notrace);
 #endif
index 234b0722de53522d6d20bd060478ea3e97f8cbb9..3cee10abf01d1515373871918da54f65f6e433c2 100644 (file)
@@ -111,11 +111,9 @@ EXPORT_SYMBOL_GPL(x86_platform);
 #if defined(CONFIG_PCI_MSI)
 struct x86_msi_ops x86_msi = {
        .setup_msi_irqs         = native_setup_msi_irqs,
-       .compose_msi_msg        = native_compose_msi_msg,
        .teardown_msi_irq       = native_teardown_msi_irq,
        .teardown_msi_irqs      = default_teardown_msi_irqs,
        .restore_msi_irqs       = default_restore_msi_irqs,
-       .setup_hpet_msi         = default_setup_hpet_msi,
 };
 
 /* MSI arch specific hooks */
@@ -141,13 +139,6 @@ void arch_restore_msi_irqs(struct pci_dev *dev)
 #endif
 
 struct x86_io_apic_ops x86_io_apic_ops = {
-       .init                   = native_io_apic_init_mappings,
        .read                   = native_io_apic_read,
-       .write                  = native_io_apic_write,
-       .modify                 = native_io_apic_modify,
        .disable                = native_disable_io_apic,
-       .print_entries          = native_io_apic_print_entries,
-       .set_affinity           = native_ioapic_set_affinity,
-       .setup_entry            = native_setup_ioapic_entry,
-       .eoi_ioapic_pin         = native_eoi_ioapic_pin,
 };
diff --git a/arch/x86/kernel/xsave.c b/arch/x86/kernel/xsave.c
deleted file mode 100644 (file)
index 87a815b..0000000
+++ /dev/null
@@ -1,724 +0,0 @@
-/*
- * xsave/xrstor support.
- *
- * Author: Suresh Siddha <suresh.b.siddha@intel.com>
- */
-
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/bootmem.h>
-#include <linux/compat.h>
-#include <linux/cpu.h>
-#include <asm/i387.h>
-#include <asm/fpu-internal.h>
-#include <asm/sigframe.h>
-#include <asm/tlbflush.h>
-#include <asm/xcr.h>
-
-/*
- * Supported feature mask by the CPU and the kernel.
- */
-u64 pcntxt_mask;
-
-/*
- * Represents init state for the supported extended state.
- */
-struct xsave_struct *init_xstate_buf;
-
-static struct _fpx_sw_bytes fx_sw_reserved, fx_sw_reserved_ia32;
-static unsigned int *xstate_offsets, *xstate_sizes;
-static unsigned int xstate_comp_offsets[sizeof(pcntxt_mask)*8];
-static unsigned int xstate_features;
-
-/*
- * If a processor implementation discern that a processor state component is
- * in its initialized state it may modify the corresponding bit in the
- * xsave_hdr.xstate_bv as '0', with out modifying the corresponding memory
- * layout in the case of xsaveopt. While presenting the xstate information to
- * the user, we always ensure that the memory layout of a feature will be in
- * the init state if the corresponding header bit is zero. This is to ensure
- * that the user doesn't see some stale state in the memory layout during
- * signal handling, debugging etc.
- */
-void __sanitize_i387_state(struct task_struct *tsk)
-{
-       struct i387_fxsave_struct *fx = &tsk->thread.fpu.state->fxsave;
-       int feature_bit = 0x2;
-       u64 xstate_bv;
-
-       if (!fx)
-               return;
-
-       xstate_bv = tsk->thread.fpu.state->xsave.xsave_hdr.xstate_bv;
-
-       /*
-        * None of the feature bits are in init state. So nothing else
-        * to do for us, as the memory layout is up to date.
-        */
-       if ((xstate_bv & pcntxt_mask) == pcntxt_mask)
-               return;
-
-       /*
-        * FP is in init state
-        */
-       if (!(xstate_bv & XSTATE_FP)) {
-               fx->cwd = 0x37f;
-               fx->swd = 0;
-               fx->twd = 0;
-               fx->fop = 0;
-               fx->rip = 0;
-               fx->rdp = 0;
-               memset(&fx->st_space[0], 0, 128);
-       }
-
-       /*
-        * SSE is in init state
-        */
-       if (!(xstate_bv & XSTATE_SSE))
-               memset(&fx->xmm_space[0], 0, 256);
-
-       xstate_bv = (pcntxt_mask & ~xstate_bv) >> 2;
-
-       /*
-        * Update all the other memory layouts for which the corresponding
-        * header bit is in the init state.
-        */
-       while (xstate_bv) {
-               if (xstate_bv & 0x1) {
-                       int offset = xstate_offsets[feature_bit];
-                       int size = xstate_sizes[feature_bit];
-
-                       memcpy(((void *) fx) + offset,
-                              ((void *) init_xstate_buf) + offset,
-                              size);
-               }
-
-               xstate_bv >>= 1;
-               feature_bit++;
-       }
-}
-
-/*
- * Check for the presence of extended state information in the
- * user fpstate pointer in the sigcontext.
- */
-static inline int check_for_xstate(struct i387_fxsave_struct __user *buf,
-                                  void __user *fpstate,
-                                  struct _fpx_sw_bytes *fx_sw)
-{
-       int min_xstate_size = sizeof(struct i387_fxsave_struct) +
-                             sizeof(struct xsave_hdr_struct);
-       unsigned int magic2;
-
-       if (__copy_from_user(fx_sw, &buf->sw_reserved[0], sizeof(*fx_sw)))
-               return -1;
-
-       /* Check for the first magic field and other error scenarios. */
-       if (fx_sw->magic1 != FP_XSTATE_MAGIC1 ||
-           fx_sw->xstate_size < min_xstate_size ||
-           fx_sw->xstate_size > xstate_size ||
-           fx_sw->xstate_size > fx_sw->extended_size)
-               return -1;
-
-       /*
-        * Check for the presence of second magic word at the end of memory
-        * layout. This detects the case where the user just copied the legacy
-        * fpstate layout with out copying the extended state information
-        * in the memory layout.
-        */
-       if (__get_user(magic2, (__u32 __user *)(fpstate + fx_sw->xstate_size))
-           || magic2 != FP_XSTATE_MAGIC2)
-               return -1;
-
-       return 0;
-}
-
-/*
- * Signal frame handlers.
- */
-static inline int save_fsave_header(struct task_struct *tsk, void __user *buf)
-{
-       if (use_fxsr()) {
-               struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave;
-               struct user_i387_ia32_struct env;
-               struct _fpstate_ia32 __user *fp = buf;
-
-               convert_from_fxsr(&env, tsk);
-
-               if (__copy_to_user(buf, &env, sizeof(env)) ||
-                   __put_user(xsave->i387.swd, &fp->status) ||
-                   __put_user(X86_FXSR_MAGIC, &fp->magic))
-                       return -1;
-       } else {
-               struct i387_fsave_struct __user *fp = buf;
-               u32 swd;
-               if (__get_user(swd, &fp->swd) || __put_user(swd, &fp->status))
-                       return -1;
-       }
-
-       return 0;
-}
-
-static inline int save_xstate_epilog(void __user *buf, int ia32_frame)
-{
-       struct xsave_struct __user *x = buf;
-       struct _fpx_sw_bytes *sw_bytes;
-       u32 xstate_bv;
-       int err;
-
-       /* Setup the bytes not touched by the [f]xsave and reserved for SW. */
-       sw_bytes = ia32_frame ? &fx_sw_reserved_ia32 : &fx_sw_reserved;
-       err = __copy_to_user(&x->i387.sw_reserved, sw_bytes, sizeof(*sw_bytes));
-
-       if (!use_xsave())
-               return err;
-
-       err |= __put_user(FP_XSTATE_MAGIC2, (__u32 *)(buf + xstate_size));
-
-       /*
-        * Read the xstate_bv which we copied (directly from the cpu or
-        * from the state in task struct) to the user buffers.
-        */
-       err |= __get_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
-
-       /*
-        * For legacy compatible, we always set FP/SSE bits in the bit
-        * vector while saving the state to the user context. This will
-        * enable us capturing any changes(during sigreturn) to
-        * the FP/SSE bits by the legacy applications which don't touch
-        * xstate_bv in the xsave header.
-        *
-        * xsave aware apps can change the xstate_bv in the xsave
-        * header as well as change any contents in the memory layout.
-        * xrestore as part of sigreturn will capture all the changes.
-        */
-       xstate_bv |= XSTATE_FPSSE;
-
-       err |= __put_user(xstate_bv, (__u32 *)&x->xsave_hdr.xstate_bv);
-
-       return err;
-}
-
-static inline int save_user_xstate(struct xsave_struct __user *buf)
-{
-       int err;
-
-       if (use_xsave())
-               err = xsave_user(buf);
-       else if (use_fxsr())
-               err = fxsave_user((struct i387_fxsave_struct __user *) buf);
-       else
-               err = fsave_user((struct i387_fsave_struct __user *) buf);
-
-       if (unlikely(err) && __clear_user(buf, xstate_size))
-               err = -EFAULT;
-       return err;
-}
-
-/*
- * Save the fpu, extended register state to the user signal frame.
- *
- * 'buf_fx' is the 64-byte aligned pointer at which the [f|fx|x]save
- *  state is copied.
- *  'buf' points to the 'buf_fx' or to the fsave header followed by 'buf_fx'.
- *
- *     buf == buf_fx for 64-bit frames and 32-bit fsave frame.
- *     buf != buf_fx for 32-bit frames with fxstate.
- *
- * If the fpu, extended register state is live, save the state directly
- * to the user frame pointed by the aligned pointer 'buf_fx'. Otherwise,
- * copy the thread's fpu state to the user frame starting at 'buf_fx'.
- *
- * If this is a 32-bit frame with fxstate, put a fsave header before
- * the aligned state at 'buf_fx'.
- *
- * For [f]xsave state, update the SW reserved fields in the [f]xsave frame
- * indicating the absence/presence of the extended state to the user.
- */
-int save_xstate_sig(void __user *buf, void __user *buf_fx, int size)
-{
-       struct xsave_struct *xsave = &current->thread.fpu.state->xsave;
-       struct task_struct *tsk = current;
-       int ia32_fxstate = (buf != buf_fx);
-
-       ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
-                        config_enabled(CONFIG_IA32_EMULATION));
-
-       if (!access_ok(VERIFY_WRITE, buf, size))
-               return -EACCES;
-
-       if (!static_cpu_has(X86_FEATURE_FPU))
-               return fpregs_soft_get(current, NULL, 0,
-                       sizeof(struct user_i387_ia32_struct), NULL,
-                       (struct _fpstate_ia32 __user *) buf) ? -1 : 1;
-
-       if (user_has_fpu()) {
-               /* Save the live register state to the user directly. */
-               if (save_user_xstate(buf_fx))
-                       return -1;
-               /* Update the thread's fxstate to save the fsave header. */
-               if (ia32_fxstate)
-                       fpu_fxsave(&tsk->thread.fpu);
-       } else {
-               sanitize_i387_state(tsk);
-               if (__copy_to_user(buf_fx, xsave, xstate_size))
-                       return -1;
-       }
-
-       /* Save the fsave header for the 32-bit frames. */
-       if ((ia32_fxstate || !use_fxsr()) && save_fsave_header(tsk, buf))
-               return -1;
-
-       if (use_fxsr() && save_xstate_epilog(buf_fx, ia32_fxstate))
-               return -1;
-
-       return 0;
-}
-
-static inline void
-sanitize_restored_xstate(struct task_struct *tsk,
-                        struct user_i387_ia32_struct *ia32_env,
-                        u64 xstate_bv, int fx_only)
-{
-       struct xsave_struct *xsave = &tsk->thread.fpu.state->xsave;
-       struct xsave_hdr_struct *xsave_hdr = &xsave->xsave_hdr;
-
-       if (use_xsave()) {
-               /* These bits must be zero. */
-               memset(xsave_hdr->reserved, 0, 48);
-
-               /*
-                * Init the state that is not present in the memory
-                * layout and not enabled by the OS.
-                */
-               if (fx_only)
-                       xsave_hdr->xstate_bv = XSTATE_FPSSE;
-               else
-                       xsave_hdr->xstate_bv &= (pcntxt_mask & xstate_bv);
-       }
-
-       if (use_fxsr()) {
-               /*
-                * mscsr reserved bits must be masked to zero for security
-                * reasons.
-                */
-               xsave->i387.mxcsr &= mxcsr_feature_mask;
-
-               convert_to_fxsr(tsk, ia32_env);
-       }
-}
-
-/*
- * Restore the extended state if present. Otherwise, restore the FP/SSE state.
- */
-static inline int restore_user_xstate(void __user *buf, u64 xbv, int fx_only)
-{
-       if (use_xsave()) {
-               if ((unsigned long)buf % 64 || fx_only) {
-                       u64 init_bv = pcntxt_mask & ~XSTATE_FPSSE;
-                       xrstor_state(init_xstate_buf, init_bv);
-                       return fxrstor_user(buf);
-               } else {
-                       u64 init_bv = pcntxt_mask & ~xbv;
-                       if (unlikely(init_bv))
-                               xrstor_state(init_xstate_buf, init_bv);
-                       return xrestore_user(buf, xbv);
-               }
-       } else if (use_fxsr()) {
-               return fxrstor_user(buf);
-       } else
-               return frstor_user(buf);
-}
-
-int __restore_xstate_sig(void __user *buf, void __user *buf_fx, int size)
-{
-       int ia32_fxstate = (buf != buf_fx);
-       struct task_struct *tsk = current;
-       int state_size = xstate_size;
-       u64 xstate_bv = 0;
-       int fx_only = 0;
-
-       ia32_fxstate &= (config_enabled(CONFIG_X86_32) ||
-                        config_enabled(CONFIG_IA32_EMULATION));
-
-       if (!buf) {
-               fpu_reset_state(tsk);
-               return 0;
-       }
-
-       if (!access_ok(VERIFY_READ, buf, size))
-               return -EACCES;
-
-       if (!used_math() && init_fpu(tsk))
-               return -1;
-
-       if (!static_cpu_has(X86_FEATURE_FPU))
-               return fpregs_soft_set(current, NULL,
-                                      0, sizeof(struct user_i387_ia32_struct),
-                                      NULL, buf) != 0;
-
-       if (use_xsave()) {
-               struct _fpx_sw_bytes fx_sw_user;
-               if (unlikely(check_for_xstate(buf_fx, buf_fx, &fx_sw_user))) {
-                       /*
-                        * Couldn't find the extended state information in the
-                        * memory layout. Restore just the FP/SSE and init all
-                        * the other extended state.
-                        */
-                       state_size = sizeof(struct i387_fxsave_struct);
-                       fx_only = 1;
-               } else {
-                       state_size = fx_sw_user.xstate_size;
-                       xstate_bv = fx_sw_user.xstate_bv;
-               }
-       }
-
-       if (ia32_fxstate) {
-               /*
-                * For 32-bit frames with fxstate, copy the user state to the
-                * thread's fpu state, reconstruct fxstate from the fsave
-                * header. Sanitize the copied state etc.
-                */
-               struct fpu *fpu = &tsk->thread.fpu;
-               struct user_i387_ia32_struct env;
-               int err = 0;
-
-               /*
-                * Drop the current fpu which clears used_math(). This ensures
-                * that any context-switch during the copy of the new state,
-                * avoids the intermediate state from getting restored/saved.
-                * Thus avoiding the new restored state from getting corrupted.
-                * We will be ready to restore/save the state only after
-                * set_used_math() is again set.
-                */
-               drop_fpu(tsk);
-
-               if (__copy_from_user(&fpu->state->xsave, buf_fx, state_size) ||
-                   __copy_from_user(&env, buf, sizeof(env))) {
-                       fpu_finit(fpu);
-                       err = -1;
-               } else {
-                       sanitize_restored_xstate(tsk, &env, xstate_bv, fx_only);
-               }
-
-               set_used_math();
-               if (use_eager_fpu()) {
-                       preempt_disable();
-                       math_state_restore();
-                       preempt_enable();
-               }
-
-               return err;
-       } else {
-               /*
-                * For 64-bit frames and 32-bit fsave frames, restore the user
-                * state to the registers directly (with exceptions handled).
-                */
-               user_fpu_begin();
-               if (restore_user_xstate(buf_fx, xstate_bv, fx_only)) {
-                       fpu_reset_state(tsk);
-                       return -1;
-               }
-       }
-
-       return 0;
-}
-
-/*
- * Prepare the SW reserved portion of the fxsave memory layout, indicating
- * the presence of the extended state information in the memory layout
- * pointed by the fpstate pointer in the sigcontext.
- * This will be saved when ever the FP and extended state context is
- * saved on the user stack during the signal handler delivery to the user.
- */
-static void prepare_fx_sw_frame(void)
-{
-       int fsave_header_size = sizeof(struct i387_fsave_struct);
-       int size = xstate_size + FP_XSTATE_MAGIC2_SIZE;
-
-       if (config_enabled(CONFIG_X86_32))
-               size += fsave_header_size;
-
-       fx_sw_reserved.magic1 = FP_XSTATE_MAGIC1;
-       fx_sw_reserved.extended_size = size;
-       fx_sw_reserved.xstate_bv = pcntxt_mask;
-       fx_sw_reserved.xstate_size = xstate_size;
-
-       if (config_enabled(CONFIG_IA32_EMULATION)) {
-               fx_sw_reserved_ia32 = fx_sw_reserved;
-               fx_sw_reserved_ia32.extended_size += fsave_header_size;
-       }
-}
-
-/*
- * Enable the extended processor state save/restore feature
- */
-static inline void xstate_enable(void)
-{
-       cr4_set_bits(X86_CR4_OSXSAVE);
-       xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
-}
-
-/*
- * Record the offsets and sizes of different state managed by the xsave
- * memory layout.
- */
-static void __init setup_xstate_features(void)
-{
-       int eax, ebx, ecx, edx, leaf = 0x2;
-
-       xstate_features = fls64(pcntxt_mask);
-       xstate_offsets = alloc_bootmem(xstate_features * sizeof(int));
-       xstate_sizes = alloc_bootmem(xstate_features * sizeof(int));
-
-       do {
-               cpuid_count(XSTATE_CPUID, leaf, &eax, &ebx, &ecx, &edx);
-
-               if (eax == 0)
-                       break;
-
-               xstate_offsets[leaf] = ebx;
-               xstate_sizes[leaf] = eax;
-
-               leaf++;
-       } while (1);
-}
-
-/*
- * This function sets up offsets and sizes of all extended states in
- * xsave area. This supports both standard format and compacted format
- * of the xsave aread.
- *
- * Input: void
- * Output: void
- */
-void setup_xstate_comp(void)
-{
-       unsigned int xstate_comp_sizes[sizeof(pcntxt_mask)*8];
-       int i;
-
-       /*
-        * The FP xstates and SSE xstates are legacy states. They are always
-        * in the fixed offsets in the xsave area in either compacted form
-        * or standard form.
-        */
-       xstate_comp_offsets[0] = 0;
-       xstate_comp_offsets[1] = offsetof(struct i387_fxsave_struct, xmm_space);
-
-       if (!cpu_has_xsaves) {
-               for (i = 2; i < xstate_features; i++) {
-                       if (test_bit(i, (unsigned long *)&pcntxt_mask)) {
-                               xstate_comp_offsets[i] = xstate_offsets[i];
-                               xstate_comp_sizes[i] = xstate_sizes[i];
-                       }
-               }
-               return;
-       }
-
-       xstate_comp_offsets[2] = FXSAVE_SIZE + XSAVE_HDR_SIZE;
-
-       for (i = 2; i < xstate_features; i++) {
-               if (test_bit(i, (unsigned long *)&pcntxt_mask))
-                       xstate_comp_sizes[i] = xstate_sizes[i];
-               else
-                       xstate_comp_sizes[i] = 0;
-
-               if (i > 2)
-                       xstate_comp_offsets[i] = xstate_comp_offsets[i-1]
-                                       + xstate_comp_sizes[i-1];
-
-       }
-}
-
-/*
- * setup the xstate image representing the init state
- */
-static void __init setup_init_fpu_buf(void)
-{
-       /*
-        * Setup init_xstate_buf to represent the init state of
-        * all the features managed by the xsave
-        */
-       init_xstate_buf = alloc_bootmem_align(xstate_size,
-                                             __alignof__(struct xsave_struct));
-       fx_finit(&init_xstate_buf->i387);
-
-       if (!cpu_has_xsave)
-               return;
-
-       setup_xstate_features();
-
-       if (cpu_has_xsaves) {
-               init_xstate_buf->xsave_hdr.xcomp_bv =
-                                               (u64)1 << 63 | pcntxt_mask;
-               init_xstate_buf->xsave_hdr.xstate_bv = pcntxt_mask;
-       }
-
-       /*
-        * Init all the features state with header_bv being 0x0
-        */
-       xrstor_state_booting(init_xstate_buf, -1);
-       /*
-        * Dump the init state again. This is to identify the init state
-        * of any feature which is not represented by all zero's.
-        */
-       xsave_state_booting(init_xstate_buf, -1);
-}
-
-static enum { AUTO, ENABLE, DISABLE } eagerfpu = AUTO;
-static int __init eager_fpu_setup(char *s)
-{
-       if (!strcmp(s, "on"))
-               eagerfpu = ENABLE;
-       else if (!strcmp(s, "off"))
-               eagerfpu = DISABLE;
-       else if (!strcmp(s, "auto"))
-               eagerfpu = AUTO;
-       return 1;
-}
-__setup("eagerfpu=", eager_fpu_setup);
-
-
-/*
- * Calculate total size of enabled xstates in XCR0/pcntxt_mask.
- */
-static void __init init_xstate_size(void)
-{
-       unsigned int eax, ebx, ecx, edx;
-       int i;
-
-       if (!cpu_has_xsaves) {
-               cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
-               xstate_size = ebx;
-               return;
-       }
-
-       xstate_size = FXSAVE_SIZE + XSAVE_HDR_SIZE;
-       for (i = 2; i < 64; i++) {
-               if (test_bit(i, (unsigned long *)&pcntxt_mask)) {
-                       cpuid_count(XSTATE_CPUID, i, &eax, &ebx, &ecx, &edx);
-                       xstate_size += eax;
-               }
-       }
-}
-
-/*
- * Enable and initialize the xsave feature.
- */
-static void __init xstate_enable_boot_cpu(void)
-{
-       unsigned int eax, ebx, ecx, edx;
-
-       if (boot_cpu_data.cpuid_level < XSTATE_CPUID) {
-               WARN(1, KERN_ERR "XSTATE_CPUID missing\n");
-               return;
-       }
-
-       cpuid_count(XSTATE_CPUID, 0, &eax, &ebx, &ecx, &edx);
-       pcntxt_mask = eax + ((u64)edx << 32);
-
-       if ((pcntxt_mask & XSTATE_FPSSE) != XSTATE_FPSSE) {
-               pr_err("FP/SSE not shown under xsave features 0x%llx\n",
-                      pcntxt_mask);
-               BUG();
-       }
-
-       /*
-        * Support only the state known to OS.
-        */
-       pcntxt_mask = pcntxt_mask & XCNTXT_MASK;
-
-       xstate_enable();
-
-       /*
-        * Recompute the context size for enabled features
-        */
-       init_xstate_size();
-
-       update_regset_xstate_info(xstate_size, pcntxt_mask);
-       prepare_fx_sw_frame();
-       setup_init_fpu_buf();
-
-       /* Auto enable eagerfpu for xsaveopt */
-       if (cpu_has_xsaveopt && eagerfpu != DISABLE)
-               eagerfpu = ENABLE;
-
-       if (pcntxt_mask & XSTATE_EAGER) {
-               if (eagerfpu == DISABLE) {
-                       pr_err("eagerfpu not present, disabling some xstate features: 0x%llx\n",
-                                       pcntxt_mask & XSTATE_EAGER);
-                       pcntxt_mask &= ~XSTATE_EAGER;
-               } else {
-                       eagerfpu = ENABLE;
-               }
-       }
-
-       pr_info("enabled xstate_bv 0x%llx, cntxt size 0x%x using %s\n",
-               pcntxt_mask, xstate_size,
-               cpu_has_xsaves ? "compacted form" : "standard form");
-}
-
-/*
- * For the very first instance, this calls xstate_enable_boot_cpu();
- * for all subsequent instances, this calls xstate_enable().
- *
- * This is somewhat obfuscated due to the lack of powerful enough
- * overrides for the section checks.
- */
-void xsave_init(void)
-{
-       static __refdata void (*next_func)(void) = xstate_enable_boot_cpu;
-       void (*this_func)(void);
-
-       if (!cpu_has_xsave)
-               return;
-
-       this_func = next_func;
-       next_func = xstate_enable;
-       this_func();
-}
-
-/*
- * setup_init_fpu_buf() is __init and it is OK to call it here because
- * init_xstate_buf will be unset only once during boot.
- */
-void __init_refok eager_fpu_init(void)
-{
-       WARN_ON(used_math());
-       current_thread_info()->status = 0;
-
-       if (eagerfpu == ENABLE)
-               setup_force_cpu_cap(X86_FEATURE_EAGER_FPU);
-
-       if (!cpu_has_eager_fpu) {
-               stts();
-               return;
-       }
-
-       if (!init_xstate_buf)
-               setup_init_fpu_buf();
-}
-
-/*
- * Given the xsave area and a state inside, this function returns the
- * address of the state.
- *
- * This is the API that is called to get xstate address in either
- * standard format or compacted format of xsave area.
- *
- * Inputs:
- *     xsave: base address of the xsave area;
- *     xstate: state which is defined in xsave.h (e.g. XSTATE_FP, XSTATE_SSE,
- *     etc.)
- * Output:
- *     address of the state in the xsave area.
- */
-void *get_xsave_addr(struct xsave_struct *xsave, int xstate)
-{
-       int feature = fls64(xstate) - 1;
-       if (!test_bit(feature, (unsigned long *)&pcntxt_mask))
-               return NULL;
-
-       return (void *)xsave + xstate_comp_offsets[feature];
-}
-EXPORT_SYMBOL_GPL(get_xsave_addr);
index 59b69f6a2844cdce101a69c3bb34eb7ccd30556f..9f705e618af574d9a406f2d55f723bfa11129385 100644 (file)
@@ -17,7 +17,7 @@
 #include <linux/vmalloc.h>
 #include <linux/uaccess.h>
 #include <asm/user.h>
-#include <asm/xsave.h>
+#include <asm/fpu/xstate.h>
 #include "cpuid.h"
 #include "lapic.h"
 #include "mmu.h"
@@ -95,6 +95,8 @@ int kvm_update_cpuid(struct kvm_vcpu *vcpu)
        if (best && (best->eax & (F(XSAVES) | F(XSAVEC))))
                best->ebx = xstate_required_size(vcpu->arch.xcr0, true);
 
+       vcpu->arch.eager_fpu = guest_cpuid_has_mpx(vcpu);
+
        /*
         * The existing code assumes virtual address is 48-bit in the canonical
         * address checks; exit if it is ever changed.
index c3b1ad9fca818befb9e5920f7eb7c0d2b703d245..496b3695d3d3c96fd2687b2b6bc013d9ee8d96e5 100644 (file)
@@ -117,4 +117,12 @@ static inline bool guest_cpuid_has_rtm(struct kvm_vcpu *vcpu)
        best = kvm_find_cpuid_entry(vcpu, 7, 0);
        return best && (best->ebx & bit(X86_FEATURE_RTM));
 }
+
+static inline bool guest_cpuid_has_mpx(struct kvm_vcpu *vcpu)
+{
+       struct kvm_cpuid_entry2 *best;
+
+       best = kvm_find_cpuid_entry(vcpu, 7, 0);
+       return best && (best->ebx & bit(X86_FEATURE_MPX));
+}
 #endif
index 629af0f1c5c4d0953010adc88233132bcdff4cb7..4c7deb4f78a147b1a4a8b451120d3b80fa6401dc 100644 (file)
@@ -1090,6 +1090,17 @@ static void update_divide_count(struct kvm_lapic *apic)
                                   apic->divide_count);
 }
 
+static void apic_update_lvtt(struct kvm_lapic *apic)
+{
+       u32 timer_mode = kvm_apic_get_reg(apic, APIC_LVTT) &
+                       apic->lapic_timer.timer_mode_mask;
+
+       if (apic->lapic_timer.timer_mode != timer_mode) {
+               apic->lapic_timer.timer_mode = timer_mode;
+               hrtimer_cancel(&apic->lapic_timer.timer);
+       }
+}
+
 static void apic_timer_expired(struct kvm_lapic *apic)
 {
        struct kvm_vcpu *vcpu = apic->vcpu;
@@ -1298,6 +1309,7 @@ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
                                apic_set_reg(apic, APIC_LVTT + 0x10 * i,
                                             lvt_val | APIC_LVT_MASKED);
                        }
+                       apic_update_lvtt(apic);
                        atomic_set(&apic->lapic_timer.pending, 0);
 
                }
@@ -1330,20 +1342,13 @@ static int apic_reg_write(struct kvm_lapic *apic, u32 reg, u32 val)
 
                break;
 
-       case APIC_LVTT: {
-               u32 timer_mode = val & apic->lapic_timer.timer_mode_mask;
-
-               if (apic->lapic_timer.timer_mode != timer_mode) {
-                       apic->lapic_timer.timer_mode = timer_mode;
-                       hrtimer_cancel(&apic->lapic_timer.timer);
-               }
-
+       case APIC_LVTT:
                if (!kvm_apic_sw_enabled(apic))
                        val |= APIC_LVT_MASKED;
                val &= (apic_lvt_mask[0] | apic->lapic_timer.timer_mode_mask);
                apic_set_reg(apic, APIC_LVTT, val);
+               apic_update_lvtt(apic);
                break;
-       }
 
        case APIC_TMICT:
                if (apic_lvtt_tscdeadline(apic))
@@ -1576,7 +1581,7 @@ void kvm_lapic_reset(struct kvm_vcpu *vcpu)
 
        for (i = 0; i < APIC_LVT_NUM; i++)
                apic_set_reg(apic, APIC_LVTT + 0x10 * i, APIC_LVT_MASKED);
-       apic->lapic_timer.timer_mode = 0;
+       apic_update_lvtt(apic);
        apic_set_reg(apic, APIC_LVT0,
                     SET_APIC_DELIVERY_MODE(0, APIC_MODE_EXTINT));
 
@@ -1802,6 +1807,7 @@ void kvm_apic_post_state_restore(struct kvm_vcpu *vcpu,
 
        apic_update_ppr(apic);
        hrtimer_cancel(&apic->lapic_timer.timer);
+       apic_update_lvtt(apic);
        update_divide_count(apic);
        start_apic_timer(apic);
        apic->irr_pending = true;
index d43867c33bc4efee0c970e9602cd63616b4ec739..b73337634214c209e250051cd21e00bd2436fcd6 100644 (file)
@@ -3736,8 +3736,8 @@ static void reset_rsvds_bits_mask_ept(struct kvm_vcpu *vcpu,
        }
 }
 
-void update_permission_bitmask(struct kvm_vcpu *vcpu,
-               struct kvm_mmu *mmu, bool ept)
+static void update_permission_bitmask(struct kvm_vcpu *vcpu,
+                                     struct kvm_mmu *mmu, bool ept)
 {
        unsigned bit, byte, pfec;
        u8 map;
@@ -3918,6 +3918,7 @@ static void init_kvm_tdp_mmu(struct kvm_vcpu *vcpu)
 void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
 {
        bool smep = kvm_read_cr4_bits(vcpu, X86_CR4_SMEP);
+       bool smap = kvm_read_cr4_bits(vcpu, X86_CR4_SMAP);
        struct kvm_mmu *context = &vcpu->arch.mmu;
 
        MMU_WARN_ON(VALID_PAGE(context->root_hpa));
@@ -3936,6 +3937,8 @@ void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu)
        context->base_role.cr0_wp  = is_write_protection(vcpu);
        context->base_role.smep_andnot_wp
                = smep && !is_write_protection(vcpu);
+       context->base_role.smap_andnot_wp
+               = smap && !is_write_protection(vcpu);
 }
 EXPORT_SYMBOL_GPL(kvm_init_shadow_mmu);
 
@@ -4207,12 +4210,18 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
                       const u8 *new, int bytes)
 {
        gfn_t gfn = gpa >> PAGE_SHIFT;
-       union kvm_mmu_page_role mask = { .word = 0 };
        struct kvm_mmu_page *sp;
        LIST_HEAD(invalid_list);
        u64 entry, gentry, *spte;
        int npte;
        bool remote_flush, local_flush, zap_page;
+       union kvm_mmu_page_role mask = { };
+
+       mask.cr0_wp = 1;
+       mask.cr4_pae = 1;
+       mask.nxe = 1;
+       mask.smep_andnot_wp = 1;
+       mask.smap_andnot_wp = 1;
 
        /*
         * If we don't have indirect shadow pages, it means no page is
@@ -4238,7 +4247,6 @@ void kvm_mmu_pte_write(struct kvm_vcpu *vcpu, gpa_t gpa,
        ++vcpu->kvm->stat.mmu_pte_write;
        kvm_mmu_audit(vcpu, AUDIT_PRE_PTE_WRITE);
 
-       mask.cr0_wp = mask.cr4_pae = mask.nxe = 1;
        for_each_gfn_indirect_valid_sp(vcpu->kvm, sp, gfn) {
                if (detect_write_misaligned(sp, gpa, bytes) ||
                      detect_write_flooding(sp)) {
index c7d65637c8518e55e3650b01d3b106cea9597962..0ada65ecddcf27ca619269d92435012c3f19790a 100644 (file)
@@ -71,8 +71,6 @@ enum {
 int handle_mmio_page_fault_common(struct kvm_vcpu *vcpu, u64 addr, bool direct);
 void kvm_init_shadow_mmu(struct kvm_vcpu *vcpu);
 void kvm_init_shadow_ept_mmu(struct kvm_vcpu *vcpu, bool execonly);
-void update_permission_bitmask(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
-               bool ept);
 
 static inline unsigned int kvm_mmu_available_pages(struct kvm *kvm)
 {
@@ -166,6 +164,8 @@ static inline bool permission_fault(struct kvm_vcpu *vcpu, struct kvm_mmu *mmu,
        int index = (pfec >> 1) +
                    (smap >> (X86_EFLAGS_AC_BIT - PFERR_RSVD_BIT + 1));
 
+       WARN_ON(pfec & PFERR_RSVD_MASK);
+
        return (mmu->permissions[index] >> pte_access) & 1;
 }
 
index fd49c867b25a11927fc2f6ef4522e1ef9ee80c11..6e6d115fe9b542607c946cf57d1cc37ca948c923 100644 (file)
@@ -718,6 +718,13 @@ static int FNAME(page_fault)(struct kvm_vcpu *vcpu, gva_t addr, u32 error_code,
                                              mmu_is_nested(vcpu));
                if (likely(r != RET_MMIO_PF_INVALID))
                        return r;
+
+               /*
+                * page fault with PFEC.RSVD  = 1 is caused by shadow
+                * page fault, should not be used to walk guest page
+                * table.
+                */
+               error_code &= ~PFERR_RSVD_MASK;
        };
 
        r = mmu_topup_memory_caches(vcpu);
index ce741b8650f6ece694fb47e1750d153291a1803f..9afa233b5482f6a68addba2494916e9624e52566 100644 (file)
@@ -4381,6 +4381,7 @@ static struct kvm_x86_ops svm_x86_ops = {
        .cache_reg = svm_cache_reg,
        .get_rflags = svm_get_rflags,
        .set_rflags = svm_set_rflags,
+       .fpu_activate = svm_fpu_activate,
        .fpu_deactivate = svm_fpu_deactivate,
 
        .tlb_flush = svm_flush_tlb,
index f7b61687bd79facc3f6ed0339b68ecf3fcf43aed..e11dd59398f1576d593ca16fb68a16de7ec241f7 100644 (file)
@@ -40,8 +40,7 @@
 #include <asm/vmx.h>
 #include <asm/virtext.h>
 #include <asm/mce.h>
-#include <asm/i387.h>
-#include <asm/xcr.h>
+#include <asm/fpu/internal.h>
 #include <asm/perf_event.h>
 #include <asm/debugreg.h>
 #include <asm/kexec.h>
@@ -1883,7 +1882,7 @@ static void __vmx_load_host_state(struct vcpu_vmx *vmx)
         * If the FPU is not active (through the host task or
         * the guest vcpu), then restore the cr0.TS bit.
         */
-       if (!user_has_fpu() && !vmx->vcpu.guest_fpu_loaded)
+       if (!fpregs_active() && !vmx->vcpu.guest_fpu_loaded)
                stts();
        load_gdt(this_cpu_ptr(&host_gdt));
 }
@@ -10185,6 +10184,7 @@ static struct kvm_x86_ops vmx_x86_ops = {
        .cache_reg = vmx_cache_reg,
        .get_rflags = vmx_get_rflags,
        .set_rflags = vmx_set_rflags,
+       .fpu_activate = vmx_fpu_activate,
        .fpu_deactivate = vmx_fpu_deactivate,
 
        .tlb_flush = vmx_flush_tlb,
index c73efcd03e294a2e5bc2ef276dc7fa4a6e9d837f..26eaeb522cab214bed15cba35f5be945722d70ae 100644 (file)
@@ -59,9 +59,8 @@
 #include <asm/desc.h>
 #include <asm/mtrr.h>
 #include <asm/mce.h>
-#include <asm/i387.h>
-#include <asm/fpu-internal.h> /* Ugh! */
-#include <asm/xcr.h>
+#include <linux/kernel_stat.h>
+#include <asm/fpu/internal.h> /* Ugh! */
 #include <asm/pvclock.h>
 #include <asm/div64.h>
 
@@ -702,8 +701,9 @@ EXPORT_SYMBOL_GPL(kvm_set_xcr);
 int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
 {
        unsigned long old_cr4 = kvm_read_cr4(vcpu);
-       unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE |
-                                  X86_CR4_PAE | X86_CR4_SMEP;
+       unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE |
+                                  X86_CR4_SMEP | X86_CR4_SMAP;
+
        if (cr4 & CR4_RESERVED_BITS)
                return 1;
 
@@ -744,9 +744,6 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
            (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE)))
                kvm_mmu_reset_context(vcpu);
 
-       if ((cr4 ^ old_cr4) & X86_CR4_SMAP)
-               update_permission_bitmask(vcpu, vcpu->arch.walk_mmu, false);
-
        if ((cr4 ^ old_cr4) & X86_CR4_OSXSAVE)
                kvm_update_cpuid(vcpu);
 
@@ -3196,8 +3193,8 @@ static int kvm_vcpu_ioctl_x86_set_debugregs(struct kvm_vcpu *vcpu,
 
 static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
 {
-       struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave;
-       u64 xstate_bv = xsave->xsave_hdr.xstate_bv;
+       struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave;
+       u64 xstate_bv = xsave->header.xfeatures;
        u64 valid;
 
        /*
@@ -3232,7 +3229,7 @@ static void fill_xsave(u8 *dest, struct kvm_vcpu *vcpu)
 
 static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
 {
-       struct xsave_struct *xsave = &vcpu->arch.guest_fpu.state->xsave;
+       struct xregs_state *xsave = &vcpu->arch.guest_fpu.state.xsave;
        u64 xstate_bv = *(u64 *)(src + XSAVE_HDR_OFFSET);
        u64 valid;
 
@@ -3243,9 +3240,9 @@ static void load_xsave(struct kvm_vcpu *vcpu, u8 *src)
        memcpy(xsave, src, XSAVE_HDR_OFFSET);
 
        /* Set XSTATE_BV and possibly XCOMP_BV.  */
-       xsave->xsave_hdr.xstate_bv = xstate_bv;
+       xsave->header.xfeatures = xstate_bv;
        if (cpu_has_xsaves)
-               xsave->xsave_hdr.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED;
+               xsave->header.xcomp_bv = host_xcr0 | XSTATE_COMPACTION_ENABLED;
 
        /*
         * Copy each region from the non-compacted offset to the
@@ -3277,8 +3274,8 @@ static void kvm_vcpu_ioctl_x86_get_xsave(struct kvm_vcpu *vcpu,
                fill_xsave((u8 *) guest_xsave->region, vcpu);
        } else {
                memcpy(guest_xsave->region,
-                       &vcpu->arch.guest_fpu.state->fxsave,
-                       sizeof(struct i387_fxsave_struct));
+                       &vcpu->arch.guest_fpu.state.fxsave,
+                       sizeof(struct fxregs_state));
                *(u64 *)&guest_xsave->region[XSAVE_HDR_OFFSET / sizeof(u32)] =
                        XSTATE_FPSSE;
        }
@@ -3302,8 +3299,8 @@ static int kvm_vcpu_ioctl_x86_set_xsave(struct kvm_vcpu *vcpu,
        } else {
                if (xstate_bv & ~XSTATE_FPSSE)
                        return -EINVAL;
-               memcpy(&vcpu->arch.guest_fpu.state->fxsave,
-                       guest_xsave->region, sizeof(struct i387_fxsave_struct));
+               memcpy(&vcpu->arch.guest_fpu.state.fxsave,
+                       guest_xsave->region, sizeof(struct fxregs_state));
        }
        return 0;
 }
@@ -6197,6 +6194,8 @@ void kvm_vcpu_reload_apic_access_page(struct kvm_vcpu *vcpu)
                return;
 
        page = gfn_to_page(vcpu->kvm, APIC_DEFAULT_PHYS_BASE >> PAGE_SHIFT);
+       if (is_error_page(page))
+               return;
        kvm_x86_ops->set_apic_access_page_addr(vcpu, page_to_phys(page));
 
        /*
@@ -6597,11 +6596,11 @@ static int complete_emulated_mmio(struct kvm_vcpu *vcpu)
 
 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
+       struct fpu *fpu = &current->thread.fpu;
        int r;
        sigset_t sigsaved;
 
-       if (!tsk_used_math(current) && init_fpu(current))
-               return -ENOMEM;
+       fpu__activate_curr(fpu);
 
        if (vcpu->sigset_active)
                sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
@@ -6971,8 +6970,8 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
 
 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 {
-       struct i387_fxsave_struct *fxsave =
-                       &vcpu->arch.guest_fpu.state->fxsave;
+       struct fxregs_state *fxsave =
+                       &vcpu->arch.guest_fpu.state.fxsave;
 
        memcpy(fpu->fpr, fxsave->st_space, 128);
        fpu->fcw = fxsave->cwd;
@@ -6988,8 +6987,8 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 
 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 {
-       struct i387_fxsave_struct *fxsave =
-                       &vcpu->arch.guest_fpu.state->fxsave;
+       struct fxregs_state *fxsave =
+                       &vcpu->arch.guest_fpu.state.fxsave;
 
        memcpy(fxsave->st_space, fpu->fpr, 128);
        fxsave->cwd = fpu->fcw;
@@ -7003,17 +7002,11 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
        return 0;
 }
 
-int fx_init(struct kvm_vcpu *vcpu)
+static void fx_init(struct kvm_vcpu *vcpu)
 {
-       int err;
-
-       err = fpu_alloc(&vcpu->arch.guest_fpu);
-       if (err)
-               return err;
-
-       fpu_finit(&vcpu->arch.guest_fpu);
+       fpstate_init(&vcpu->arch.guest_fpu.state);
        if (cpu_has_xsaves)
-               vcpu->arch.guest_fpu.state->xsave.xsave_hdr.xcomp_bv =
+               vcpu->arch.guest_fpu.state.xsave.header.xcomp_bv =
                        host_xcr0 | XSTATE_COMPACTION_ENABLED;
 
        /*
@@ -7022,14 +7015,6 @@ int fx_init(struct kvm_vcpu *vcpu)
        vcpu->arch.xcr0 = XSTATE_FP;
 
        vcpu->arch.cr0 |= X86_CR0_ET;
-
-       return 0;
-}
-EXPORT_SYMBOL_GPL(fx_init);
-
-static void fx_free(struct kvm_vcpu *vcpu)
-{
-       fpu_free(&vcpu->arch.guest_fpu);
 }
 
 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
@@ -7045,7 +7030,7 @@ void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
        kvm_put_guest_xcr0(vcpu);
        vcpu->guest_fpu_loaded = 1;
        __kernel_fpu_begin();
-       fpu_restore_checking(&vcpu->arch.guest_fpu);
+       __copy_kernel_to_fpregs(&vcpu->arch.guest_fpu.state);
        trace_kvm_fpu(1);
 }
 
@@ -7057,10 +7042,12 @@ void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
                return;
 
        vcpu->guest_fpu_loaded = 0;
-       fpu_save_init(&vcpu->arch.guest_fpu);
+       copy_fpregs_to_fpstate(&vcpu->arch.guest_fpu);
        __kernel_fpu_end();
        ++vcpu->stat.fpu_reload;
-       kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
+       if (!vcpu->arch.eager_fpu)
+               kvm_make_request(KVM_REQ_DEACTIVATE_FPU, vcpu);
+
        trace_kvm_fpu(0);
 }
 
@@ -7069,18 +7056,27 @@ void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
        kvmclock_reset(vcpu);
 
        free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
-       fx_free(vcpu);
        kvm_x86_ops->vcpu_free(vcpu);
 }
 
 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
                                                unsigned int id)
 {
+       struct kvm_vcpu *vcpu;
+
        if (check_tsc_unstable() && atomic_read(&kvm->online_vcpus) != 0)
                printk_once(KERN_WARNING
                "kvm: SMP vm created on host with unstable TSC; "
                "guest TSC will not be reliable\n");
-       return kvm_x86_ops->vcpu_create(kvm, id);
+
+       vcpu = kvm_x86_ops->vcpu_create(kvm, id);
+
+       /*
+        * Activate fpu unconditionally in case the guest needs eager FPU.  It will be
+        * deactivated soon if it doesn't.
+        */
+       kvm_x86_ops->fpu_activate(vcpu);
+       return vcpu;
 }
 
 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
@@ -7125,7 +7121,6 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
        kvm_mmu_unload(vcpu);
        vcpu_put(vcpu);
 
-       fx_free(vcpu);
        kvm_x86_ops->vcpu_free(vcpu);
 }
 
@@ -7351,9 +7346,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
                goto fail_free_mce_banks;
        }
 
-       r = fx_init(vcpu);
-       if (r)
-               goto fail_free_wbinvd_dirty_mask;
+       fx_init(vcpu);
 
        vcpu->arch.ia32_tsc_adjust_msr = 0x0;
        vcpu->arch.pv_time_enabled = false;
@@ -7367,8 +7360,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
        kvm_pmu_init(vcpu);
 
        return 0;
-fail_free_wbinvd_dirty_mask:
-       free_cpumask_var(vcpu->arch.wbinvd_dirty_mask);
+
 fail_free_mce_banks:
        kfree(vcpu->arch.mce_banks);
 fail_free_lapic:
index 8f9a133cc09934c2de2fa77a1eecb14ce5f94c06..f2dc08c003eb0b4c6c9f691a1c4b9e8f556eaaac 100644 (file)
@@ -70,7 +70,7 @@
 #include <asm/e820.h>
 #include <asm/mce.h>
 #include <asm/io.h>
-#include <asm/i387.h>
+#include <asm/fpu/api.h>
 #include <asm/stackprotector.h>
 #include <asm/reboot.h>                /* for struct machine_ops */
 #include <asm/kvm_para.h>
@@ -90,7 +90,7 @@ struct lguest_data lguest_data = {
        .noirq_iret = (u32)lguest_noirq_iret,
        .kernel_address = PAGE_OFFSET,
        .blocked_interrupts = { 1 }, /* Block timer interrupts */
-       .syscall_vec = SYSCALL_VECTOR,
+       .syscall_vec = IA32_SYSCALL_VECTOR,
 };
 
 /*G:037
@@ -866,7 +866,7 @@ static void __init lguest_init_IRQ(void)
        for (i = FIRST_EXTERNAL_VECTOR; i < FIRST_SYSTEM_VECTOR; i++) {
                /* Some systems map "vectors" to interrupts weirdly.  Not us! */
                __this_cpu_write(vector_irq[i], i - FIRST_EXTERNAL_VECTOR);
-               if (i != SYSCALL_VECTOR)
+               if (i != IA32_SYSCALL_VECTOR)
                        set_intr_gate(i, irq_entries_start +
                                        8 * (i - FIRST_EXTERNAL_VECTOR));
        }
index 1530afb07c85443aac9c6e1949efd4d58e8fb51a..f2587888d987f7ce4f370fd70b4b18461581b19e 100644 (file)
@@ -17,7 +17,6 @@ clean-files := inat-tables.c
 obj-$(CONFIG_SMP) += msr-smp.o cache-smp.o
 
 lib-y := delay.o misc.o cmdline.o
-lib-y += thunk_$(BITS).o
 lib-y += usercopy_$(BITS).o usercopy.o getuser.o putuser.o
 lib-y += memcpy_$(BITS).o
 lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
@@ -40,6 +39,6 @@ else
         lib-y += csum-partial_64.o csum-copy_64.o csum-wrappers_64.o
         lib-y += clear_page_64.o copy_page_64.o
         lib-y += memmove_64.o memset_64.o
-        lib-y += copy_user_64.o copy_user_nocache_64.o
+        lib-y += copy_user_64.o
        lib-y += cmpxchg16b_emu.o
 endif
index 00933d5e992f7a8df394ee347717365925c0f58c..9b0ca8fe80fce949d0a6b6825aec99d96946d8de 100644 (file)
 
 #include <linux/linkage.h>
 #include <asm/alternative-asm.h>
-#include <asm/dwarf2.h>
 
 /* if you want SMP support, implement these with real spinlocks */
 .macro LOCK reg
-       pushfl_cfi
+       pushfl
        cli
 .endm
 
 .macro UNLOCK reg
-       popfl_cfi
+       popfl
 .endm
 
 #define BEGIN(op) \
 .macro endp; \
-       CFI_ENDPROC; \
 ENDPROC(atomic64_##op##_386); \
 .purgem endp; \
 .endm; \
 ENTRY(atomic64_##op##_386); \
-       CFI_STARTPROC; \
        LOCK v;
 
 #define ENDP endp
index 082a85167a5b68feff28d749b0e04425a0f4adf2..db3ae85440ff7925df7b59fe07972068ae6d8b7a 100644 (file)
@@ -11,7 +11,6 @@
 
 #include <linux/linkage.h>
 #include <asm/alternative-asm.h>
-#include <asm/dwarf2.h>
 
 .macro read64 reg
        movl %ebx, %eax
 .endm
 
 ENTRY(atomic64_read_cx8)
-       CFI_STARTPROC
-
        read64 %ecx
        ret
-       CFI_ENDPROC
 ENDPROC(atomic64_read_cx8)
 
 ENTRY(atomic64_set_cx8)
-       CFI_STARTPROC
-
 1:
 /* we don't need LOCK_PREFIX since aligned 64-bit writes
  * are atomic on 586 and newer */
@@ -39,28 +33,23 @@ ENTRY(atomic64_set_cx8)
        jne 1b
 
        ret
-       CFI_ENDPROC
 ENDPROC(atomic64_set_cx8)
 
 ENTRY(atomic64_xchg_cx8)
-       CFI_STARTPROC
-
 1:
        LOCK_PREFIX
        cmpxchg8b (%esi)
        jne 1b
 
        ret
-       CFI_ENDPROC
 ENDPROC(atomic64_xchg_cx8)
 
 .macro addsub_return func ins insc
 ENTRY(atomic64_\func\()_return_cx8)
-       CFI_STARTPROC
-       pushl_cfi_reg ebp
-       pushl_cfi_reg ebx
-       pushl_cfi_reg esi
-       pushl_cfi_reg edi
+       pushl %ebp
+       pushl %ebx
+       pushl %esi
+       pushl %edi
 
        movl %eax, %esi
        movl %edx, %edi
@@ -79,12 +68,11 @@ ENTRY(atomic64_\func\()_return_cx8)
 10:
        movl %ebx, %eax
        movl %ecx, %edx
-       popl_cfi_reg edi
-       popl_cfi_reg esi
-       popl_cfi_reg ebx
-       popl_cfi_reg ebp
+       popl %edi
+       popl %esi
+       popl %ebx
+       popl %ebp
        ret
-       CFI_ENDPROC
 ENDPROC(atomic64_\func\()_return_cx8)
 .endm
 
@@ -93,8 +81,7 @@ addsub_return sub sub sbb
 
 .macro incdec_return func ins insc
 ENTRY(atomic64_\func\()_return_cx8)
-       CFI_STARTPROC
-       pushl_cfi_reg ebx
+       pushl %ebx
 
        read64 %esi
 1:
@@ -109,9 +96,8 @@ ENTRY(atomic64_\func\()_return_cx8)
 10:
        movl %ebx, %eax
        movl %ecx, %edx
-       popl_cfi_reg ebx
+       popl %ebx
        ret
-       CFI_ENDPROC
 ENDPROC(atomic64_\func\()_return_cx8)
 .endm
 
@@ -119,8 +105,7 @@ incdec_return inc add adc
 incdec_return dec sub sbb
 
 ENTRY(atomic64_dec_if_positive_cx8)
-       CFI_STARTPROC
-       pushl_cfi_reg ebx
+       pushl %ebx
 
        read64 %esi
 1:
@@ -136,18 +121,16 @@ ENTRY(atomic64_dec_if_positive_cx8)
 2:
        movl %ebx, %eax
        movl %ecx, %edx
-       popl_cfi_reg ebx
+       popl %ebx
        ret
-       CFI_ENDPROC
 ENDPROC(atomic64_dec_if_positive_cx8)
 
 ENTRY(atomic64_add_unless_cx8)
-       CFI_STARTPROC
-       pushl_cfi_reg ebp
-       pushl_cfi_reg ebx
+       pushl %ebp
+       pushl %ebx
 /* these just push these two parameters on the stack */
-       pushl_cfi_reg edi
-       pushl_cfi_reg ecx
+       pushl %edi
+       pushl %ecx
 
        movl %eax, %ebp
        movl %edx, %edi
@@ -168,21 +151,18 @@ ENTRY(atomic64_add_unless_cx8)
        movl $1, %eax
 3:
        addl $8, %esp
-       CFI_ADJUST_CFA_OFFSET -8
-       popl_cfi_reg ebx
-       popl_cfi_reg ebp
+       popl %ebx
+       popl %ebp
        ret
 4:
        cmpl %edx, 4(%esp)
        jne 2b
        xorl %eax, %eax
        jmp 3b
-       CFI_ENDPROC
 ENDPROC(atomic64_add_unless_cx8)
 
 ENTRY(atomic64_inc_not_zero_cx8)
-       CFI_STARTPROC
-       pushl_cfi_reg ebx
+       pushl %ebx
 
        read64 %esi
 1:
@@ -199,7 +179,6 @@ ENTRY(atomic64_inc_not_zero_cx8)
 
        movl $1, %eax
 3:
-       popl_cfi_reg ebx
+       popl %ebx
        ret
-       CFI_ENDPROC
 ENDPROC(atomic64_inc_not_zero_cx8)
index 9bc944a9127481ead40689a73054d80e50f0bc10..c1e6232098531f0d8ce14d492215041994443b81 100644 (file)
@@ -26,7 +26,6 @@
  */
 
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/errno.h>
 #include <asm/asm.h>
                                
@@ -50,9 +49,8 @@ unsigned int csum_partial(const unsigned char * buff, int len, unsigned int sum)
           * alignment for the unrolled loop.
           */           
 ENTRY(csum_partial)
-       CFI_STARTPROC
-       pushl_cfi_reg esi
-       pushl_cfi_reg ebx
+       pushl %esi
+       pushl %ebx
        movl 20(%esp),%eax      # Function arg: unsigned int sum
        movl 16(%esp),%ecx      # Function arg: int len
        movl 12(%esp),%esi      # Function arg: unsigned char *buff
@@ -129,10 +127,9 @@ ENTRY(csum_partial)
        jz 8f
        roll $8, %eax
 8:
-       popl_cfi_reg ebx
-       popl_cfi_reg esi
+       popl %ebx
+       popl %esi
        ret
-       CFI_ENDPROC
 ENDPROC(csum_partial)
 
 #else
@@ -140,9 +137,8 @@ ENDPROC(csum_partial)
 /* Version for PentiumII/PPro */
 
 ENTRY(csum_partial)
-       CFI_STARTPROC
-       pushl_cfi_reg esi
-       pushl_cfi_reg ebx
+       pushl %esi
+       pushl %ebx
        movl 20(%esp),%eax      # Function arg: unsigned int sum
        movl 16(%esp),%ecx      # Function arg: int len
        movl 12(%esp),%esi      # Function arg: const unsigned char *buf
@@ -249,10 +245,9 @@ ENTRY(csum_partial)
        jz 90f
        roll $8, %eax
 90: 
-       popl_cfi_reg ebx
-       popl_cfi_reg esi
+       popl %ebx
+       popl %esi
        ret
-       CFI_ENDPROC
 ENDPROC(csum_partial)
                                
 #endif
@@ -287,12 +282,10 @@ unsigned int csum_partial_copy_generic (const char *src, char *dst,
 #define FP             12
                
 ENTRY(csum_partial_copy_generic)
-       CFI_STARTPROC
        subl  $4,%esp   
-       CFI_ADJUST_CFA_OFFSET 4
-       pushl_cfi_reg edi
-       pushl_cfi_reg esi
-       pushl_cfi_reg ebx
+       pushl %edi
+       pushl %esi
+       pushl %ebx
        movl ARGBASE+16(%esp),%eax      # sum
        movl ARGBASE+12(%esp),%ecx      # len
        movl ARGBASE+4(%esp),%esi       # src
@@ -401,12 +394,11 @@ DST(      movb %cl, (%edi)        )
 
 .previous
 
-       popl_cfi_reg ebx
-       popl_cfi_reg esi
-       popl_cfi_reg edi
-       popl_cfi %ecx                   # equivalent to addl $4,%esp
+       popl %ebx
+       popl %esi
+       popl %edi
+       popl %ecx                       # equivalent to addl $4,%esp
        ret     
-       CFI_ENDPROC
 ENDPROC(csum_partial_copy_generic)
 
 #else
@@ -426,10 +418,9 @@ ENDPROC(csum_partial_copy_generic)
 #define ARGBASE 12
                
 ENTRY(csum_partial_copy_generic)
-       CFI_STARTPROC
-       pushl_cfi_reg ebx
-       pushl_cfi_reg edi
-       pushl_cfi_reg esi
+       pushl %ebx
+       pushl %edi
+       pushl %esi
        movl ARGBASE+4(%esp),%esi       #src
        movl ARGBASE+8(%esp),%edi       #dst    
        movl ARGBASE+12(%esp),%ecx      #len
@@ -489,11 +480,10 @@ DST(      movb %dl, (%edi)         )
        jmp  7b                 
 .previous                              
 
-       popl_cfi_reg esi
-       popl_cfi_reg edi
-       popl_cfi_reg ebx
+       popl %esi
+       popl %edi
+       popl %ebx
        ret
-       CFI_ENDPROC
 ENDPROC(csum_partial_copy_generic)
                                
 #undef ROUND
index e67e579c93bdf7f3d0737565ea106edeeefb3b6d..a2fe51b00ccefc660850e1a6810dd4adee611d2e 100644 (file)
@@ -1,5 +1,4 @@
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/cpufeature.h>
 #include <asm/alternative-asm.h>
 
@@ -15,7 +14,6 @@
  * %rdi        - page
  */
 ENTRY(clear_page)
-       CFI_STARTPROC
 
        ALTERNATIVE_2 "jmp clear_page_orig", "", X86_FEATURE_REP_GOOD, \
                      "jmp clear_page_c_e", X86_FEATURE_ERMS
@@ -24,11 +22,9 @@ ENTRY(clear_page)
        xorl %eax,%eax
        rep stosq
        ret
-       CFI_ENDPROC
 ENDPROC(clear_page)
 
 ENTRY(clear_page_orig)
-       CFI_STARTPROC
 
        xorl   %eax,%eax
        movl   $4096/64,%ecx
@@ -48,14 +44,11 @@ ENTRY(clear_page_orig)
        jnz     .Lloop
        nop
        ret
-       CFI_ENDPROC
 ENDPROC(clear_page_orig)
 
 ENTRY(clear_page_c_e)
-       CFI_STARTPROC
        movl $4096,%ecx
        xorl %eax,%eax
        rep stosb
        ret
-       CFI_ENDPROC
 ENDPROC(clear_page_c_e)
index 40a172541ee2cb2c2342a0a9934948769e87337f..9b330242e7408125d6865e8f753c61ab9ae64802 100644 (file)
@@ -6,7 +6,6 @@
  *
  */
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/percpu.h>
 
 .text
@@ -21,7 +20,6 @@
  * %al  : Operation successful
  */
 ENTRY(this_cpu_cmpxchg16b_emu)
-CFI_STARTPROC
 
 #
 # Emulate 'cmpxchg16b %gs:(%rsi)' except we return the result in %al not
@@ -32,7 +30,7 @@ CFI_STARTPROC
 # *atomic* on a single cpu (as provided by the this_cpu_xx class of
 # macros).
 #
-       pushfq_cfi
+       pushfq
        cli
 
        cmpq PER_CPU_VAR((%rsi)), %rax
@@ -43,17 +41,13 @@ CFI_STARTPROC
        movq %rbx, PER_CPU_VAR((%rsi))
        movq %rcx, PER_CPU_VAR(8(%rsi))
 
-       CFI_REMEMBER_STATE
-       popfq_cfi
+       popfq
        mov $1, %al
        ret
 
-       CFI_RESTORE_STATE
 .Lnot_same:
-       popfq_cfi
+       popfq
        xor %al,%al
        ret
 
-CFI_ENDPROC
-
 ENDPROC(this_cpu_cmpxchg16b_emu)
index b4807fce517760ac4a72dceae939ae322b0bda6b..ad53497784904b2c1f420fd41576c9ffeea8ce0b 100644 (file)
@@ -7,7 +7,6 @@
  */
 
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 
 .text
 
  * %ecx : high 32 bits of new value
  */
 ENTRY(cmpxchg8b_emu)
-CFI_STARTPROC
 
 #
 # Emulate 'cmpxchg8b (%esi)' on UP except we don't
 # set the whole ZF thing (caller will just compare
 # eax:edx with the expected value)
 #
-       pushfl_cfi
+       pushfl
        cli
 
        cmpl  (%esi), %eax
@@ -38,18 +36,15 @@ CFI_STARTPROC
        movl %ebx,  (%esi)
        movl %ecx, 4(%esi)
 
-       CFI_REMEMBER_STATE
-       popfl_cfi
+       popfl
        ret
 
-       CFI_RESTORE_STATE
 .Lnot_same:
        movl  (%esi), %eax
 .Lhalf_same:
        movl 4(%esi), %edx
 
-       popfl_cfi
+       popfl
        ret
 
-CFI_ENDPROC
 ENDPROC(cmpxchg8b_emu)
index 8239dbcbf98455a99a125953392114f07a09aa74..009f98216b7eb316c12847e42f50ac77d4f4b8a3 100644 (file)
@@ -1,7 +1,6 @@
 /* Written 2003 by Andi Kleen, based on a kernel by Evandro Menezes */
 
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/cpufeature.h>
 #include <asm/alternative-asm.h>
 
  */
        ALIGN
 ENTRY(copy_page)
-       CFI_STARTPROC
        ALTERNATIVE "jmp copy_page_regs", "", X86_FEATURE_REP_GOOD
        movl    $4096/8, %ecx
        rep     movsq
        ret
-       CFI_ENDPROC
 ENDPROC(copy_page)
 
 ENTRY(copy_page_regs)
-       CFI_STARTPROC
        subq    $2*8,   %rsp
-       CFI_ADJUST_CFA_OFFSET 2*8
        movq    %rbx,   (%rsp)
-       CFI_REL_OFFSET rbx, 0
        movq    %r12,   1*8(%rsp)
-       CFI_REL_OFFSET r12, 1*8
 
        movl    $(4096/64)-5,   %ecx
        .p2align 4
@@ -87,11 +80,7 @@ ENTRY(copy_page_regs)
        jnz     .Loop2
 
        movq    (%rsp), %rbx
-       CFI_RESTORE rbx
        movq    1*8(%rsp), %r12
-       CFI_RESTORE r12
        addq    $2*8, %rsp
-       CFI_ADJUST_CFA_OFFSET -2*8
        ret
-       CFI_ENDPROC
 ENDPROC(copy_page_regs)
index fa997dfaef242fa9abdb28c20658a939caf72697..982ce34f4a9bf66011fc2652b45466d9c2b276f9 100644 (file)
@@ -7,7 +7,6 @@
  */
 
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/current.h>
 #include <asm/asm-offsets.h>
 #include <asm/thread_info.h>
 #include <asm/asm.h>
 #include <asm/smap.h>
 
-       .macro ALIGN_DESTINATION
-       /* check for bad alignment of destination */
-       movl %edi,%ecx
-       andl $7,%ecx
-       jz 102f                         /* already aligned */
-       subl $8,%ecx
-       negl %ecx
-       subl %ecx,%edx
-100:   movb (%rsi),%al
-101:   movb %al,(%rdi)
-       incq %rsi
-       incq %rdi
-       decl %ecx
-       jnz 100b
-102:
-       .section .fixup,"ax"
-103:   addl %ecx,%edx                  /* ecx is zerorest also */
-       jmp copy_user_handle_tail
-       .previous
-
-       _ASM_EXTABLE(100b,103b)
-       _ASM_EXTABLE(101b,103b)
-       .endm
-
 /* Standard copy_to_user with segment limit checking */
 ENTRY(_copy_to_user)
-       CFI_STARTPROC
        GET_THREAD_INFO(%rax)
        movq %rdi,%rcx
        addq %rdx,%rcx
@@ -54,12 +28,10 @@ ENTRY(_copy_to_user)
                      X86_FEATURE_REP_GOOD,                     \
                      "jmp copy_user_enhanced_fast_string",     \
                      X86_FEATURE_ERMS
-       CFI_ENDPROC
 ENDPROC(_copy_to_user)
 
 /* Standard copy_from_user with segment limit checking */
 ENTRY(_copy_from_user)
-       CFI_STARTPROC
        GET_THREAD_INFO(%rax)
        movq %rsi,%rcx
        addq %rdx,%rcx
@@ -71,14 +43,12 @@ ENTRY(_copy_from_user)
                      X86_FEATURE_REP_GOOD,                     \
                      "jmp copy_user_enhanced_fast_string",     \
                      X86_FEATURE_ERMS
-       CFI_ENDPROC
 ENDPROC(_copy_from_user)
 
        .section .fixup,"ax"
        /* must zero dest */
 ENTRY(bad_from_user)
 bad_from_user:
-       CFI_STARTPROC
        movl %edx,%ecx
        xorl %eax,%eax
        rep
@@ -86,7 +56,6 @@ bad_from_user:
 bad_to_user:
        movl %edx,%eax
        ret
-       CFI_ENDPROC
 ENDPROC(bad_from_user)
        .previous
 
@@ -104,7 +73,6 @@ ENDPROC(bad_from_user)
  * eax uncopied bytes or 0 if successful.
  */
 ENTRY(copy_user_generic_unrolled)
-       CFI_STARTPROC
        ASM_STAC
        cmpl $8,%edx
        jb 20f          /* less then 8 bytes, go to byte copy loop */
@@ -186,7 +154,6 @@ ENTRY(copy_user_generic_unrolled)
        _ASM_EXTABLE(19b,40b)
        _ASM_EXTABLE(21b,50b)
        _ASM_EXTABLE(22b,50b)
-       CFI_ENDPROC
 ENDPROC(copy_user_generic_unrolled)
 
 /* Some CPUs run faster using the string copy instructions.
@@ -208,7 +175,6 @@ ENDPROC(copy_user_generic_unrolled)
  * eax uncopied bytes or 0 if successful.
  */
 ENTRY(copy_user_generic_string)
-       CFI_STARTPROC
        ASM_STAC
        cmpl $8,%edx
        jb 2f           /* less than 8 bytes, go to byte copy loop */
@@ -233,7 +199,6 @@ ENTRY(copy_user_generic_string)
 
        _ASM_EXTABLE(1b,11b)
        _ASM_EXTABLE(3b,12b)
-       CFI_ENDPROC
 ENDPROC(copy_user_generic_string)
 
 /*
@@ -249,7 +214,6 @@ ENDPROC(copy_user_generic_string)
  * eax uncopied bytes or 0 if successful.
  */
 ENTRY(copy_user_enhanced_fast_string)
-       CFI_STARTPROC
        ASM_STAC
        movl %edx,%ecx
 1:     rep
@@ -264,5 +228,94 @@ ENTRY(copy_user_enhanced_fast_string)
        .previous
 
        _ASM_EXTABLE(1b,12b)
-       CFI_ENDPROC
 ENDPROC(copy_user_enhanced_fast_string)
+
+/*
+ * copy_user_nocache - Uncached memory copy with exception handling
+ * This will force destination/source out of cache for more performance.
+ */
+ENTRY(__copy_user_nocache)
+       ASM_STAC
+       cmpl $8,%edx
+       jb 20f          /* less then 8 bytes, go to byte copy loop */
+       ALIGN_DESTINATION
+       movl %edx,%ecx
+       andl $63,%edx
+       shrl $6,%ecx
+       jz 17f
+1:     movq (%rsi),%r8
+2:     movq 1*8(%rsi),%r9
+3:     movq 2*8(%rsi),%r10
+4:     movq 3*8(%rsi),%r11
+5:     movnti %r8,(%rdi)
+6:     movnti %r9,1*8(%rdi)
+7:     movnti %r10,2*8(%rdi)
+8:     movnti %r11,3*8(%rdi)
+9:     movq 4*8(%rsi),%r8
+10:    movq 5*8(%rsi),%r9
+11:    movq 6*8(%rsi),%r10
+12:    movq 7*8(%rsi),%r11
+13:    movnti %r8,4*8(%rdi)
+14:    movnti %r9,5*8(%rdi)
+15:    movnti %r10,6*8(%rdi)
+16:    movnti %r11,7*8(%rdi)
+       leaq 64(%rsi),%rsi
+       leaq 64(%rdi),%rdi
+       decl %ecx
+       jnz 1b
+17:    movl %edx,%ecx
+       andl $7,%edx
+       shrl $3,%ecx
+       jz 20f
+18:    movq (%rsi),%r8
+19:    movnti %r8,(%rdi)
+       leaq 8(%rsi),%rsi
+       leaq 8(%rdi),%rdi
+       decl %ecx
+       jnz 18b
+20:    andl %edx,%edx
+       jz 23f
+       movl %edx,%ecx
+21:    movb (%rsi),%al
+22:    movb %al,(%rdi)
+       incq %rsi
+       incq %rdi
+       decl %ecx
+       jnz 21b
+23:    xorl %eax,%eax
+       ASM_CLAC
+       sfence
+       ret
+
+       .section .fixup,"ax"
+30:    shll $6,%ecx
+       addl %ecx,%edx
+       jmp 60f
+40:    lea (%rdx,%rcx,8),%rdx
+       jmp 60f
+50:    movl %ecx,%edx
+60:    sfence
+       jmp copy_user_handle_tail
+       .previous
+
+       _ASM_EXTABLE(1b,30b)
+       _ASM_EXTABLE(2b,30b)
+       _ASM_EXTABLE(3b,30b)
+       _ASM_EXTABLE(4b,30b)
+       _ASM_EXTABLE(5b,30b)
+       _ASM_EXTABLE(6b,30b)
+       _ASM_EXTABLE(7b,30b)
+       _ASM_EXTABLE(8b,30b)
+       _ASM_EXTABLE(9b,30b)
+       _ASM_EXTABLE(10b,30b)
+       _ASM_EXTABLE(11b,30b)
+       _ASM_EXTABLE(12b,30b)
+       _ASM_EXTABLE(13b,30b)
+       _ASM_EXTABLE(14b,30b)
+       _ASM_EXTABLE(15b,30b)
+       _ASM_EXTABLE(16b,30b)
+       _ASM_EXTABLE(18b,40b)
+       _ASM_EXTABLE(19b,40b)
+       _ASM_EXTABLE(21b,50b)
+       _ASM_EXTABLE(22b,50b)
+ENDPROC(__copy_user_nocache)
diff --git a/arch/x86/lib/copy_user_nocache_64.S b/arch/x86/lib/copy_user_nocache_64.S
deleted file mode 100644 (file)
index 6a4f43c..0000000
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright 2008 Vitaly Mayatskikh <vmayatsk@redhat.com>
- * Copyright 2002 Andi Kleen, SuSE Labs.
- * Subject to the GNU Public License v2.
- *
- * Functions to copy from and to user space.
- */
-
-#include <linux/linkage.h>
-#include <asm/dwarf2.h>
-
-#define FIX_ALIGNMENT 1
-
-#include <asm/current.h>
-#include <asm/asm-offsets.h>
-#include <asm/thread_info.h>
-#include <asm/asm.h>
-#include <asm/smap.h>
-
-       .macro ALIGN_DESTINATION
-#ifdef FIX_ALIGNMENT
-       /* check for bad alignment of destination */
-       movl %edi,%ecx
-       andl $7,%ecx
-       jz 102f                         /* already aligned */
-       subl $8,%ecx
-       negl %ecx
-       subl %ecx,%edx
-100:   movb (%rsi),%al
-101:   movb %al,(%rdi)
-       incq %rsi
-       incq %rdi
-       decl %ecx
-       jnz 100b
-102:
-       .section .fixup,"ax"
-103:   addl %ecx,%edx                  /* ecx is zerorest also */
-       jmp copy_user_handle_tail
-       .previous
-
-       _ASM_EXTABLE(100b,103b)
-       _ASM_EXTABLE(101b,103b)
-#endif
-       .endm
-
-/*
- * copy_user_nocache - Uncached memory copy with exception handling
- * This will force destination/source out of cache for more performance.
- */
-ENTRY(__copy_user_nocache)
-       CFI_STARTPROC
-       ASM_STAC
-       cmpl $8,%edx
-       jb 20f          /* less then 8 bytes, go to byte copy loop */
-       ALIGN_DESTINATION
-       movl %edx,%ecx
-       andl $63,%edx
-       shrl $6,%ecx
-       jz 17f
-1:     movq (%rsi),%r8
-2:     movq 1*8(%rsi),%r9
-3:     movq 2*8(%rsi),%r10
-4:     movq 3*8(%rsi),%r11
-5:     movnti %r8,(%rdi)
-6:     movnti %r9,1*8(%rdi)
-7:     movnti %r10,2*8(%rdi)
-8:     movnti %r11,3*8(%rdi)
-9:     movq 4*8(%rsi),%r8
-10:    movq 5*8(%rsi),%r9
-11:    movq 6*8(%rsi),%r10
-12:    movq 7*8(%rsi),%r11
-13:    movnti %r8,4*8(%rdi)
-14:    movnti %r9,5*8(%rdi)
-15:    movnti %r10,6*8(%rdi)
-16:    movnti %r11,7*8(%rdi)
-       leaq 64(%rsi),%rsi
-       leaq 64(%rdi),%rdi
-       decl %ecx
-       jnz 1b
-17:    movl %edx,%ecx
-       andl $7,%edx
-       shrl $3,%ecx
-       jz 20f
-18:    movq (%rsi),%r8
-19:    movnti %r8,(%rdi)
-       leaq 8(%rsi),%rsi
-       leaq 8(%rdi),%rdi
-       decl %ecx
-       jnz 18b
-20:    andl %edx,%edx
-       jz 23f
-       movl %edx,%ecx
-21:    movb (%rsi),%al
-22:    movb %al,(%rdi)
-       incq %rsi
-       incq %rdi
-       decl %ecx
-       jnz 21b
-23:    xorl %eax,%eax
-       ASM_CLAC
-       sfence
-       ret
-
-       .section .fixup,"ax"
-30:    shll $6,%ecx
-       addl %ecx,%edx
-       jmp 60f
-40:    lea (%rdx,%rcx,8),%rdx
-       jmp 60f
-50:    movl %ecx,%edx
-60:    sfence
-       jmp copy_user_handle_tail
-       .previous
-
-       _ASM_EXTABLE(1b,30b)
-       _ASM_EXTABLE(2b,30b)
-       _ASM_EXTABLE(3b,30b)
-       _ASM_EXTABLE(4b,30b)
-       _ASM_EXTABLE(5b,30b)
-       _ASM_EXTABLE(6b,30b)
-       _ASM_EXTABLE(7b,30b)
-       _ASM_EXTABLE(8b,30b)
-       _ASM_EXTABLE(9b,30b)
-       _ASM_EXTABLE(10b,30b)
-       _ASM_EXTABLE(11b,30b)
-       _ASM_EXTABLE(12b,30b)
-       _ASM_EXTABLE(13b,30b)
-       _ASM_EXTABLE(14b,30b)
-       _ASM_EXTABLE(15b,30b)
-       _ASM_EXTABLE(16b,30b)
-       _ASM_EXTABLE(18b,40b)
-       _ASM_EXTABLE(19b,40b)
-       _ASM_EXTABLE(21b,50b)
-       _ASM_EXTABLE(22b,50b)
-       CFI_ENDPROC
-ENDPROC(__copy_user_nocache)
index 9734182966f3be925a38c0762aa71e3148156148..7e48807b2fa198c4e2b8e7022df2218308a41b7c 100644 (file)
@@ -6,7 +6,6 @@
  * for more details. No warranty for anything given at all.
  */
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/errno.h>
 #include <asm/asm.h>
 
 
 
 ENTRY(csum_partial_copy_generic)
-       CFI_STARTPROC
        cmpl    $3*64, %edx
        jle     .Lignore
 
 .Lignore:
        subq  $7*8, %rsp
-       CFI_ADJUST_CFA_OFFSET 7*8
        movq  %rbx, 2*8(%rsp)
-       CFI_REL_OFFSET rbx, 2*8
        movq  %r12, 3*8(%rsp)
-       CFI_REL_OFFSET r12, 3*8
        movq  %r14, 4*8(%rsp)
-       CFI_REL_OFFSET r14, 4*8
        movq  %r13, 5*8(%rsp)
-       CFI_REL_OFFSET r13, 5*8
        movq  %rbp, 6*8(%rsp)
-       CFI_REL_OFFSET rbp, 6*8
 
        movq  %r8, (%rsp)
        movq  %r9, 1*8(%rsp)
@@ -206,22 +198,14 @@ ENTRY(csum_partial_copy_generic)
        addl %ebx, %eax
        adcl %r9d, %eax         /* carry */
 
-       CFI_REMEMBER_STATE
 .Lende:
        movq 2*8(%rsp), %rbx
-       CFI_RESTORE rbx
        movq 3*8(%rsp), %r12
-       CFI_RESTORE r12
        movq 4*8(%rsp), %r14
-       CFI_RESTORE r14
        movq 5*8(%rsp), %r13
-       CFI_RESTORE r13
        movq 6*8(%rsp), %rbp
-       CFI_RESTORE rbp
        addq $7*8, %rsp
-       CFI_ADJUST_CFA_OFFSET -7*8
        ret
-       CFI_RESTORE_STATE
 
        /* Exception handlers. Very simple, zeroing is done in the wrappers */
 .Lbad_source:
@@ -237,5 +221,4 @@ ENTRY(csum_partial_copy_generic)
        jz   .Lende
        movl $-EFAULT, (%rax)
        jmp .Lende
-       CFI_ENDPROC
 ENDPROC(csum_partial_copy_generic)
index a4512359656aea8fcda1d2c19d45f3bb4936bee3..46668cda4ffdfd5af38abe5ca973ca15aa19d8d8 100644 (file)
@@ -26,7 +26,6 @@
  */
 
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/page_types.h>
 #include <asm/errno.h>
 #include <asm/asm-offsets.h>
@@ -36,7 +35,6 @@
 
        .text
 ENTRY(__get_user_1)
-       CFI_STARTPROC
        GET_THREAD_INFO(%_ASM_DX)
        cmp TI_addr_limit(%_ASM_DX),%_ASM_AX
        jae bad_get_user
@@ -45,11 +43,9 @@ ENTRY(__get_user_1)
        xor %eax,%eax
        ASM_CLAC
        ret
-       CFI_ENDPROC
 ENDPROC(__get_user_1)
 
 ENTRY(__get_user_2)
-       CFI_STARTPROC
        add $1,%_ASM_AX
        jc bad_get_user
        GET_THREAD_INFO(%_ASM_DX)
@@ -60,11 +56,9 @@ ENTRY(__get_user_2)
        xor %eax,%eax
        ASM_CLAC
        ret
-       CFI_ENDPROC
 ENDPROC(__get_user_2)
 
 ENTRY(__get_user_4)
-       CFI_STARTPROC
        add $3,%_ASM_AX
        jc bad_get_user
        GET_THREAD_INFO(%_ASM_DX)
@@ -75,11 +69,9 @@ ENTRY(__get_user_4)
        xor %eax,%eax
        ASM_CLAC
        ret
-       CFI_ENDPROC
 ENDPROC(__get_user_4)
 
 ENTRY(__get_user_8)
-       CFI_STARTPROC
 #ifdef CONFIG_X86_64
        add $7,%_ASM_AX
        jc bad_get_user
@@ -104,28 +96,23 @@ ENTRY(__get_user_8)
        ASM_CLAC
        ret
 #endif
-       CFI_ENDPROC
 ENDPROC(__get_user_8)
 
 
 bad_get_user:
-       CFI_STARTPROC
        xor %edx,%edx
        mov $(-EFAULT),%_ASM_AX
        ASM_CLAC
        ret
-       CFI_ENDPROC
 END(bad_get_user)
 
 #ifdef CONFIG_X86_32
 bad_get_user_8:
-       CFI_STARTPROC
        xor %edx,%edx
        xor %ecx,%ecx
        mov $(-EFAULT),%_ASM_AX
        ASM_CLAC
        ret
-       CFI_ENDPROC
 END(bad_get_user_8)
 #endif
 
index 05a95e713da885e8686bc2bba1fdf544c03d2d74..33147fef3452ce5bad3a6540ad5234ada774d96a 100644 (file)
  */
 
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 
 /*
  * override generic version in lib/iomap_copy.c
  */
 ENTRY(__iowrite32_copy)
-       CFI_STARTPROC
        movl %edx,%ecx
        rep movsd
        ret
-       CFI_ENDPROC
 ENDPROC(__iowrite32_copy)
index b046664f5a1ccf37f3c94b2ed7d8d5b3298e89bf..16698bba87deb9ff593d934e8a8fb1447be9b214 100644 (file)
@@ -2,7 +2,6 @@
 
 #include <linux/linkage.h>
 #include <asm/cpufeature.h>
-#include <asm/dwarf2.h>
 #include <asm/alternative-asm.h>
 
 /*
@@ -53,7 +52,6 @@ ENTRY(memcpy_erms)
 ENDPROC(memcpy_erms)
 
 ENTRY(memcpy_orig)
-       CFI_STARTPROC
        movq %rdi, %rax
 
        cmpq $0x20, %rdx
@@ -178,5 +176,4 @@ ENTRY(memcpy_orig)
 
 .Lend:
        retq
-       CFI_ENDPROC
 ENDPROC(memcpy_orig)
index 0f8a0d0331b91715238f01e8f54ce74525fb0cc4..ca2afdd6d98ed2be90da6f9ea1624beb102f0fc3 100644 (file)
@@ -6,7 +6,6 @@
  *     - Copyright 2011 Fenghua Yu <fenghua.yu@intel.com>
  */
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/cpufeature.h>
 #include <asm/alternative-asm.h>
 
@@ -27,7 +26,6 @@
 
 ENTRY(memmove)
 ENTRY(__memmove)
-       CFI_STARTPROC
 
        /* Handle more 32 bytes in loop */
        mov %rdi, %rax
@@ -207,6 +205,5 @@ ENTRY(__memmove)
        movb %r11b, (%rdi)
 13:
        retq
-       CFI_ENDPROC
 ENDPROC(__memmove)
 ENDPROC(memmove)
index 93118fb239762ba78efd754d20a756aed0221411..2661fad0582716f780af9904dc5b7c62199a5c58 100644 (file)
@@ -1,7 +1,6 @@
 /* Copyright 2002 Andi Kleen, SuSE Labs */
 
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/cpufeature.h>
 #include <asm/alternative-asm.h>
 
@@ -66,7 +65,6 @@ ENTRY(memset_erms)
 ENDPROC(memset_erms)
 
 ENTRY(memset_orig)
-       CFI_STARTPROC
        movq %rdi,%r10
 
        /* expand byte value  */
@@ -78,7 +76,6 @@ ENTRY(memset_orig)
        movl  %edi,%r9d
        andl  $7,%r9d
        jnz  .Lbad_alignment
-       CFI_REMEMBER_STATE
 .Lafter_bad_alignment:
 
        movq  %rdx,%rcx
@@ -128,7 +125,6 @@ ENTRY(memset_orig)
        movq    %r10,%rax
        ret
 
-       CFI_RESTORE_STATE
 .Lbad_alignment:
        cmpq $7,%rdx
        jbe     .Lhandle_7
@@ -139,5 +135,4 @@ ENTRY(memset_orig)
        subq %r8,%rdx
        jmp .Lafter_bad_alignment
 .Lfinal:
-       CFI_ENDPROC
 ENDPROC(memset_orig)
index c9f2d9ba8dd8c2da54b0bbd07c37be1d38aa49c4..e5e3ed8dc0798bd007e8573ddbf57dc4e2312049 100644 (file)
@@ -22,7 +22,7 @@
 #include <linux/sched.h>
 #include <linux/types.h>
 
-#include <asm/i387.h>
+#include <asm/fpu/api.h>
 #include <asm/asm.h>
 
 void *_mmx_memcpy(void *to, const void *from, size_t len)
index 3ca5218fbece0c568ed6079ee2370ff1cedf71f8..c81556409bbb87cfbfb5b2b8b2cdd26e8e916730 100644 (file)
@@ -1,6 +1,5 @@
 #include <linux/linkage.h>
 #include <linux/errno.h>
-#include <asm/dwarf2.h>
 #include <asm/asm.h>
 #include <asm/msr.h>
 
@@ -13,9 +12,8 @@
  */
 .macro op_safe_regs op
 ENTRY(\op\()_safe_regs)
-       CFI_STARTPROC
-       pushq_cfi_reg rbx
-       pushq_cfi_reg rbp
+       pushq %rbx
+       pushq %rbp
        movq    %rdi, %r10      /* Save pointer */
        xorl    %r11d, %r11d    /* Return value */
        movl    (%rdi), %eax
@@ -25,7 +23,6 @@ ENTRY(\op\()_safe_regs)
        movl    20(%rdi), %ebp
        movl    24(%rdi), %esi
        movl    28(%rdi), %edi
-       CFI_REMEMBER_STATE
 1:     \op
 2:     movl    %eax, (%r10)
        movl    %r11d, %eax     /* Return value */
@@ -35,16 +32,14 @@ ENTRY(\op\()_safe_regs)
        movl    %ebp, 20(%r10)
        movl    %esi, 24(%r10)
        movl    %edi, 28(%r10)
-       popq_cfi_reg rbp
-       popq_cfi_reg rbx
+       popq %rbp
+       popq %rbx
        ret
 3:
-       CFI_RESTORE_STATE
        movl    $-EIO, %r11d
        jmp     2b
 
        _ASM_EXTABLE(1b, 3b)
-       CFI_ENDPROC
 ENDPROC(\op\()_safe_regs)
 .endm
 
@@ -52,13 +47,12 @@ ENDPROC(\op\()_safe_regs)
 
 .macro op_safe_regs op
 ENTRY(\op\()_safe_regs)
-       CFI_STARTPROC
-       pushl_cfi_reg ebx
-       pushl_cfi_reg ebp
-       pushl_cfi_reg esi
-       pushl_cfi_reg edi
-       pushl_cfi $0              /* Return value */
-       pushl_cfi %eax
+       pushl %ebx
+       pushl %ebp
+       pushl %esi
+       pushl %edi
+       pushl $0              /* Return value */
+       pushl %eax
        movl    4(%eax), %ecx
        movl    8(%eax), %edx
        movl    12(%eax), %ebx
@@ -66,32 +60,28 @@ ENTRY(\op\()_safe_regs)
        movl    24(%eax), %esi
        movl    28(%eax), %edi
        movl    (%eax), %eax
-       CFI_REMEMBER_STATE
 1:     \op
-2:     pushl_cfi %eax
+2:     pushl %eax
        movl    4(%esp), %eax
-       popl_cfi (%eax)
+       popl (%eax)
        addl    $4, %esp
-       CFI_ADJUST_CFA_OFFSET -4
        movl    %ecx, 4(%eax)
        movl    %edx, 8(%eax)
        movl    %ebx, 12(%eax)
        movl    %ebp, 20(%eax)
        movl    %esi, 24(%eax)
        movl    %edi, 28(%eax)
-       popl_cfi %eax
-       popl_cfi_reg edi
-       popl_cfi_reg esi
-       popl_cfi_reg ebp
-       popl_cfi_reg ebx
+       popl %eax
+       popl %edi
+       popl %esi
+       popl %ebp
+       popl %ebx
        ret
 3:
-       CFI_RESTORE_STATE
        movl    $-EIO, 4(%esp)
        jmp     2b
 
        _ASM_EXTABLE(1b, 3b)
-       CFI_ENDPROC
 ENDPROC(\op\()_safe_regs)
 .endm
 
index fc6ba17a7eec2a957fc2022d8a370bb1b9e5b15f..e0817a12d32362b4e69687c7b09e424580616e5d 100644 (file)
@@ -11,7 +11,6 @@
  * return value.
  */
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 #include <asm/thread_info.h>
 #include <asm/errno.h>
 #include <asm/asm.h>
  * as they get called from within inline assembly.
  */
 
-#define ENTER  CFI_STARTPROC ; \
-               GET_THREAD_INFO(%_ASM_BX)
+#define ENTER  GET_THREAD_INFO(%_ASM_BX)
 #define EXIT   ASM_CLAC ;      \
-               ret ;           \
-               CFI_ENDPROC
+               ret
 
 .text
 ENTRY(__put_user_1)
@@ -87,7 +84,6 @@ ENTRY(__put_user_8)
 ENDPROC(__put_user_8)
 
 bad_put_user:
-       CFI_STARTPROC
        movl $-EFAULT,%eax
        EXIT
 END(bad_put_user)
index 2322abe4da3b014aef389c03e177c37eec31b802..40027db991405ba123a508a7d9523afe04fd744b 100644 (file)
@@ -15,7 +15,6 @@
 
 #include <linux/linkage.h>
 #include <asm/alternative-asm.h>
-#include <asm/dwarf2.h>
 
 #define __ASM_HALF_REG(reg)    __ASM_SEL(reg, e##reg)
 #define __ASM_HALF_SIZE(inst)  __ASM_SEL(inst##w, inst##l)
  */
 
 #define save_common_regs \
-       pushl_cfi_reg ecx
+       pushl %ecx
 
 #define restore_common_regs \
-       popl_cfi_reg ecx
+       popl %ecx
 
        /* Avoid uglifying the argument copying x86-64 needs to do. */
        .macro movq src, dst
  */
 
 #define save_common_regs \
-       pushq_cfi_reg rdi; \
-       pushq_cfi_reg rsi; \
-       pushq_cfi_reg rcx; \
-       pushq_cfi_reg r8;  \
-       pushq_cfi_reg r9;  \
-       pushq_cfi_reg r10; \
-       pushq_cfi_reg r11
+       pushq %rdi; \
+       pushq %rsi; \
+       pushq %rcx; \
+       pushq %r8;  \
+       pushq %r9;  \
+       pushq %r10; \
+       pushq %r11
 
 #define restore_common_regs \
-       popq_cfi_reg r11; \
-       popq_cfi_reg r10; \
-       popq_cfi_reg r9; \
-       popq_cfi_reg r8; \
-       popq_cfi_reg rcx; \
-       popq_cfi_reg rsi; \
-       popq_cfi_reg rdi
+       popq %r11; \
+       popq %r10; \
+       popq %r9; \
+       popq %r8; \
+       popq %rcx; \
+       popq %rsi; \
+       popq %rdi
 
 #endif
 
 /* Fix up special calling conventions */
 ENTRY(call_rwsem_down_read_failed)
-       CFI_STARTPROC
        save_common_regs
-       __ASM_SIZE(push,_cfi_reg) __ASM_REG(dx)
+       __ASM_SIZE(push,) %__ASM_REG(dx)
        movq %rax,%rdi
        call rwsem_down_read_failed
-       __ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx)
+       __ASM_SIZE(pop,) %__ASM_REG(dx)
        restore_common_regs
        ret
-       CFI_ENDPROC
 ENDPROC(call_rwsem_down_read_failed)
 
 ENTRY(call_rwsem_down_write_failed)
-       CFI_STARTPROC
        save_common_regs
        movq %rax,%rdi
        call rwsem_down_write_failed
        restore_common_regs
        ret
-       CFI_ENDPROC
 ENDPROC(call_rwsem_down_write_failed)
 
 ENTRY(call_rwsem_wake)
-       CFI_STARTPROC
        /* do nothing if still outstanding active readers */
        __ASM_HALF_SIZE(dec) %__ASM_HALF_REG(dx)
        jnz 1f
@@ -116,17 +110,14 @@ ENTRY(call_rwsem_wake)
        call rwsem_wake
        restore_common_regs
 1:     ret
-       CFI_ENDPROC
 ENDPROC(call_rwsem_wake)
 
 ENTRY(call_rwsem_downgrade_wake)
-       CFI_STARTPROC
        save_common_regs
-       __ASM_SIZE(push,_cfi_reg) __ASM_REG(dx)
+       __ASM_SIZE(push,) %__ASM_REG(dx)
        movq %rax,%rdi
        call rwsem_downgrade_wake
-       __ASM_SIZE(pop,_cfi_reg) __ASM_REG(dx)
+       __ASM_SIZE(pop,) %__ASM_REG(dx)
        restore_common_regs
        ret
-       CFI_ENDPROC
 ENDPROC(call_rwsem_downgrade_wake)
diff --git a/arch/x86/lib/thunk_32.S b/arch/x86/lib/thunk_32.S
deleted file mode 100644 (file)
index 5eb7150..0000000
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Trampoline to trace irqs off. (otherwise CALLER_ADDR1 might crash)
- * Copyright 2008 by Steven Rostedt, Red Hat, Inc
- *  (inspired by Andi Kleen's thunk_64.S)
- * Subject to the GNU public license, v.2. No warranty of any kind.
- */
-       #include <linux/linkage.h>
-       #include <asm/asm.h>
-       #include <asm/dwarf2.h>
-
-       /* put return address in eax (arg1) */
-       .macro THUNK name, func, put_ret_addr_in_eax=0
-       .globl \name
-\name:
-       CFI_STARTPROC
-       pushl_cfi_reg eax
-       pushl_cfi_reg ecx
-       pushl_cfi_reg edx
-
-       .if \put_ret_addr_in_eax
-       /* Place EIP in the arg1 */
-       movl 3*4(%esp), %eax
-       .endif
-
-       call \func
-       popl_cfi_reg edx
-       popl_cfi_reg ecx
-       popl_cfi_reg eax
-       ret
-       CFI_ENDPROC
-       _ASM_NOKPROBE(\name)
-       .endm
-
-#ifdef CONFIG_TRACE_IRQFLAGS
-       THUNK trace_hardirqs_on_thunk,trace_hardirqs_on_caller,1
-       THUNK trace_hardirqs_off_thunk,trace_hardirqs_off_caller,1
-#endif
-
-#ifdef CONFIG_PREEMPT
-       THUNK ___preempt_schedule, preempt_schedule
-#ifdef CONFIG_CONTEXT_TRACKING
-       THUNK ___preempt_schedule_context, preempt_schedule_context
-#endif
-#endif
-
diff --git a/arch/x86/lib/thunk_64.S b/arch/x86/lib/thunk_64.S
deleted file mode 100644 (file)
index f89ba4e..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Save registers before calling assembly functions. This avoids
- * disturbance of register allocation in some inline assembly constructs.
- * Copyright 2001,2002 by Andi Kleen, SuSE Labs.
- * Added trace_hardirqs callers - Copyright 2007 Steven Rostedt, Red Hat, Inc.
- * Subject to the GNU public license, v.2. No warranty of any kind.
- */
-#include <linux/linkage.h>
-#include <asm/dwarf2.h>
-#include <asm/calling.h>
-#include <asm/asm.h>
-
-       /* rdi: arg1 ... normal C conventions. rax is saved/restored. */
-       .macro THUNK name, func, put_ret_addr_in_rdi=0
-       .globl \name
-\name:
-       CFI_STARTPROC
-
-       /* this one pushes 9 elems, the next one would be %rIP */
-       pushq_cfi_reg rdi
-       pushq_cfi_reg rsi
-       pushq_cfi_reg rdx
-       pushq_cfi_reg rcx
-       pushq_cfi_reg rax
-       pushq_cfi_reg r8
-       pushq_cfi_reg r9
-       pushq_cfi_reg r10
-       pushq_cfi_reg r11
-
-       .if \put_ret_addr_in_rdi
-       /* 9*8(%rsp) is return addr on stack */
-       movq_cfi_restore 9*8, rdi
-       .endif
-
-       call \func
-       jmp  restore
-       CFI_ENDPROC
-       _ASM_NOKPROBE(\name)
-       .endm
-
-#ifdef CONFIG_TRACE_IRQFLAGS
-       THUNK trace_hardirqs_on_thunk,trace_hardirqs_on_caller,1
-       THUNK trace_hardirqs_off_thunk,trace_hardirqs_off_caller,1
-#endif
-
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
-       THUNK lockdep_sys_exit_thunk,lockdep_sys_exit
-#endif
-
-#ifdef CONFIG_PREEMPT
-       THUNK ___preempt_schedule, preempt_schedule
-#ifdef CONFIG_CONTEXT_TRACKING
-       THUNK ___preempt_schedule_context, preempt_schedule_context
-#endif
-#endif
-
-#if defined(CONFIG_TRACE_IRQFLAGS) \
- || defined(CONFIG_DEBUG_LOCK_ALLOC) \
- || defined(CONFIG_PREEMPT)
-       CFI_STARTPROC
-       CFI_ADJUST_CFA_OFFSET 9*8
-restore:
-       popq_cfi_reg r11
-       popq_cfi_reg r10
-       popq_cfi_reg r9
-       popq_cfi_reg r8
-       popq_cfi_reg rax
-       popq_cfi_reg rcx
-       popq_cfi_reg rdx
-       popq_cfi_reg rsi
-       popq_cfi_reg rdi
-       ret
-       CFI_ENDPROC
-       _ASM_NOKPROBE(restore)
-#endif
index e2f5e21c03b3044a14ed12cb460ea2e3c0a0e13f..91d93b95bd8685228b395c10e77d30e3a4303355 100644 (file)
@@ -647,7 +647,8 @@ EXPORT_SYMBOL(__copy_from_user_ll_nocache_nozero);
  * @from: Source address, in kernel space.
  * @n:    Number of bytes to copy.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * Copy data from kernel space to user space.
  *
@@ -668,7 +669,8 @@ EXPORT_SYMBOL(_copy_to_user);
  * @from: Source address, in user space.
  * @n:    Number of bytes to copy.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * Copy data from user space to kernel space.
  *
index dc8adad10a2f3881cdbca82b9a624dc17b88c4f8..dd76a05729b0106c95ae2bd925ca21a647214fd6 100644 (file)
@@ -30,7 +30,7 @@ static void fclex(void)
 }
 
 /* Needs to be externally visible */
-void finit_soft_fpu(struct i387_soft_struct *soft)
+void fpstate_init_soft(struct swregs_state *soft)
 {
        struct address *oaddr, *iaddr;
        memset(soft, 0, sizeof(*soft));
@@ -52,7 +52,7 @@ void finit_soft_fpu(struct i387_soft_struct *soft)
 
 void finit(void)
 {
-       finit_soft_fpu(&current->thread.fpu.state->soft);
+       fpstate_init_soft(&current->thread.fpu.state.soft);
 }
 
 /*
index 9b868124128d79699d6325dc579908bc575c3f4a..f37e84ab49f38e335bde57880a6cbe8640fb2c4b 100644 (file)
@@ -31,7 +31,7 @@
 #include <asm/traps.h>
 #include <asm/desc.h>
 #include <asm/user.h>
-#include <asm/i387.h>
+#include <asm/fpu/internal.h>
 
 #include "fpu_system.h"
 #include "fpu_emu.h"
@@ -147,13 +147,9 @@ void math_emulate(struct math_emu_info *info)
        unsigned long code_base = 0;
        unsigned long code_limit = 0;   /* Initialized to stop compiler warnings */
        struct desc_struct code_descriptor;
+       struct fpu *fpu = &current->thread.fpu;
 
-       if (!used_math()) {
-               if (init_fpu(current)) {
-                       do_group_exit(SIGKILL);
-                       return;
-               }
-       }
+       fpu__activate_curr(fpu);
 
 #ifdef RE_ENTRANT_CHECKING
        if (emulating) {
@@ -673,7 +669,7 @@ void math_abort(struct math_emu_info *info, unsigned int signal)
 #endif /* PARANOID */
 }
 
-#define S387 ((struct i387_soft_struct *)s387)
+#define S387 ((struct swregs_state *)s387)
 #define sstatus_word() \
   ((S387->swd & ~SW_Top & 0xffff) | ((S387->ftop << SW_Top_Shift) & SW_Top))
 
@@ -682,14 +678,14 @@ int fpregs_soft_set(struct task_struct *target,
                    unsigned int pos, unsigned int count,
                    const void *kbuf, const void __user *ubuf)
 {
-       struct i387_soft_struct *s387 = &target->thread.fpu.state->soft;
+       struct swregs_state *s387 = &target->thread.fpu.state.soft;
        void *space = s387->st_space;
        int ret;
        int offset, other, i, tags, regnr, tag, newtop;
 
        RE_ENTRANT_CHECK_OFF;
        ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, s387, 0,
-                                offsetof(struct i387_soft_struct, st_space));
+                                offsetof(struct swregs_state, st_space));
        RE_ENTRANT_CHECK_ON;
 
        if (ret)
@@ -734,7 +730,7 @@ int fpregs_soft_get(struct task_struct *target,
                    unsigned int pos, unsigned int count,
                    void *kbuf, void __user *ubuf)
 {
-       struct i387_soft_struct *s387 = &target->thread.fpu.state->soft;
+       struct swregs_state *s387 = &target->thread.fpu.state.soft;
        const void *space = s387->st_space;
        int ret;
        int offset = (S387->ftop & 7) * 10, other = 80 - offset;
@@ -752,7 +748,7 @@ int fpregs_soft_get(struct task_struct *target,
 #endif /* PECULIAR_486 */
 
        ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, s387, 0,
-                                 offsetof(struct i387_soft_struct, st_space));
+                                 offsetof(struct swregs_state, st_space));
 
        /* Copy all registers in stack order. */
        if (!ret)
index 2c614410a5f3978d646f87d2814edaf2ec383396..9ccecb61a4fa129a82028b27edc18b91a2f99042 100644 (file)
@@ -31,7 +31,7 @@
 #define SEG_EXPAND_DOWN(s)     (((s).b & ((1 << 11) | (1 << 10))) \
                                 == (1 << 10))
 
-#define I387                   (current->thread.fpu.state)
+#define I387                   (&current->thread.fpu.state)
 #define FPU_info               (I387->soft.info)
 
 #define FPU_CS                 (*(unsigned short *) &(FPU_info->regs->cs))
index 181c53bac3a7ee8881b8844bae66b951d9beecde..9dc909841739bf24b01d7d2e574c4870cc409cfd 100644 (file)
@@ -13,6 +13,7 @@
 #include <linux/hugetlb.h>             /* hstate_index_to_shift        */
 #include <linux/prefetch.h>            /* prefetchw                    */
 #include <linux/context_tracking.h>    /* exception_enter(), ...       */
+#include <linux/uaccess.h>             /* faulthandler_disabled()      */
 
 #include <asm/traps.h>                 /* dotraplinkage, ...           */
 #include <asm/pgalloc.h>               /* pgd_*(), ...                 */
@@ -1126,9 +1127,9 @@ __do_page_fault(struct pt_regs *regs, unsigned long error_code,
 
        /*
         * If we're in an interrupt, have no user context or are running
-        * in an atomic region then we must not take the fault:
+        * in a region with pagefaults disabled then we must not take the fault
         */
-       if (unlikely(in_atomic() || !mm)) {
+       if (unlikely(faulthandler_disabled() || !mm)) {
                bad_area_nosemaphore(regs, error_code, address);
                return;
        }
index 4500142bc4aa46429cb2be41a7ee3407426f6155..eecb207a2037080f9f5d74c36c300b217a4f7a82 100644 (file)
@@ -35,7 +35,7 @@ void *kmap_atomic_prot(struct page *page, pgprot_t prot)
        unsigned long vaddr;
        int idx, type;
 
-       /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
+       preempt_disable();
        pagefault_disable();
 
        if (!PageHighMem(page))
@@ -100,6 +100,7 @@ void __kunmap_atomic(void *kvaddr)
 #endif
 
        pagefault_enable();
+       preempt_enable();
 }
 EXPORT_SYMBOL(__kunmap_atomic);
 
index 1d553186c4345c02be5c0152764d988cfe365ae1..8533b46e6bee565e242f8ea339d892b65206c97d 100644 (file)
@@ -40,7 +40,7 @@
  */
 uint16_t __cachemode2pte_tbl[_PAGE_CACHE_MODE_NUM] = {
        [_PAGE_CACHE_MODE_WB      ]     = 0         | 0        ,
-       [_PAGE_CACHE_MODE_WC      ]     = _PAGE_PWT | 0        ,
+       [_PAGE_CACHE_MODE_WC      ]     = 0         | _PAGE_PCD,
        [_PAGE_CACHE_MODE_UC_MINUS]     = 0         | _PAGE_PCD,
        [_PAGE_CACHE_MODE_UC      ]     = _PAGE_PWT | _PAGE_PCD,
        [_PAGE_CACHE_MODE_WT      ]     = 0         | _PAGE_PCD,
@@ -50,11 +50,11 @@ EXPORT_SYMBOL(__cachemode2pte_tbl);
 
 uint8_t __pte2cachemode_tbl[8] = {
        [__pte2cm_idx( 0        | 0         | 0        )] = _PAGE_CACHE_MODE_WB,
-       [__pte2cm_idx(_PAGE_PWT | 0         | 0        )] = _PAGE_CACHE_MODE_WC,
+       [__pte2cm_idx(_PAGE_PWT | 0         | 0        )] = _PAGE_CACHE_MODE_UC_MINUS,
        [__pte2cm_idx( 0        | _PAGE_PCD | 0        )] = _PAGE_CACHE_MODE_UC_MINUS,
        [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | 0        )] = _PAGE_CACHE_MODE_UC,
        [__pte2cm_idx( 0        | 0         | _PAGE_PAT)] = _PAGE_CACHE_MODE_WB,
-       [__pte2cm_idx(_PAGE_PWT | 0         | _PAGE_PAT)] = _PAGE_CACHE_MODE_WC,
+       [__pte2cm_idx(_PAGE_PWT | 0         | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
        [__pte2cm_idx(0         | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC_MINUS,
        [__pte2cm_idx(_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)] = _PAGE_CACHE_MODE_UC,
 };
index 9ca35fc60cfeaa1a8c461f76956cd22d587226a7..9c0ff045fdd4dec98832a5c6de9353174c9695f0 100644 (file)
@@ -59,6 +59,7 @@ void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
        unsigned long vaddr;
        int idx, type;
 
+       preempt_disable();
        pagefault_disable();
 
        type = kmap_atomic_idx_push();
@@ -77,13 +78,13 @@ void __iomem *
 iomap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot)
 {
        /*
-        * For non-PAT systems, promote PAGE_KERNEL_WC to PAGE_KERNEL_UC_MINUS.
-        * PAGE_KERNEL_WC maps to PWT, which translates to uncached if the
-        * MTRR is UC or WC.  UC_MINUS gets the real intention, of the
-        * user, which is "WC if the MTRR is WC, UC if you can't do that."
+        * For non-PAT systems, translate non-WB request to UC- just in
+        * case the caller set the PWT bit to prot directly without using
+        * pgprot_writecombine(). UC- translates to uncached if the MTRR
+        * is UC or WC. UC- gets the real intention, of the user, which is
+        * "WC if the MTRR is WC, UC if you can't do that."
         */
-       if (!pat_enabled && pgprot_val(prot) ==
-           (__PAGE_KERNEL | cachemode2protval(_PAGE_CACHE_MODE_WC)))
+       if (!pat_enabled() && pgprot2cachemode(prot) != _PAGE_CACHE_MODE_WB)
                prot = __pgprot(__PAGE_KERNEL |
                                cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
 
@@ -117,5 +118,6 @@ iounmap_atomic(void __iomem *kvaddr)
        }
 
        pagefault_enable();
+       preempt_enable();
 }
 EXPORT_SYMBOL_GPL(iounmap_atomic);
index 70e7444c68351641828b834fd9a9a4fc9cea3b65..cc5ccc415cc01ef8ea9e58b3f81a281c9ab412bf 100644 (file)
@@ -42,6 +42,9 @@ int ioremap_change_attr(unsigned long vaddr, unsigned long size,
        case _PAGE_CACHE_MODE_WC:
                err = _set_memory_wc(vaddr, nrpages);
                break;
+       case _PAGE_CACHE_MODE_WT:
+               err = _set_memory_wt(vaddr, nrpages);
+               break;
        case _PAGE_CACHE_MODE_WB:
                err = _set_memory_wb(vaddr, nrpages);
                break;
@@ -172,6 +175,10 @@ static void __iomem *__ioremap_caller(resource_size_t phys_addr,
                prot = __pgprot(pgprot_val(prot) |
                                cachemode2protval(_PAGE_CACHE_MODE_WC));
                break;
+       case _PAGE_CACHE_MODE_WT:
+               prot = __pgprot(pgprot_val(prot) |
+                               cachemode2protval(_PAGE_CACHE_MODE_WT));
+               break;
        case _PAGE_CACHE_MODE_WB:
                break;
        }
@@ -234,10 +241,11 @@ void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
 {
        /*
         * Ideally, this should be:
-        *      pat_enabled ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
+        *      pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
         *
         * Till we fix all X drivers to use ioremap_wc(), we will use
-        * UC MINUS.
+        * UC MINUS. Drivers that are certain they need or can already
+        * be converted over to strong UC can use ioremap_uc().
         */
        enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
 
@@ -246,6 +254,39 @@ void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
 }
 EXPORT_SYMBOL(ioremap_nocache);
 
+/**
+ * ioremap_uc     -   map bus memory into CPU space as strongly uncachable
+ * @phys_addr:    bus address of the memory
+ * @size:      size of the resource to map
+ *
+ * ioremap_uc performs a platform specific sequence of operations to
+ * make bus memory CPU accessible via the readb/readw/readl/writeb/
+ * writew/writel functions and the other mmio helpers. The returned
+ * address is not guaranteed to be usable directly as a virtual
+ * address.
+ *
+ * This version of ioremap ensures that the memory is marked with a strong
+ * preference as completely uncachable on the CPU when possible. For non-PAT
+ * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
+ * systems this will set the PAT entry for the pages as strong UC.  This call
+ * will honor existing caching rules from things like the PCI bus. Note that
+ * there are other caches and buffers on many busses. In particular driver
+ * authors should read up on PCI writes.
+ *
+ * It's useful if some control registers are in such an area and
+ * write combining or read caching is not desirable:
+ *
+ * Must be freed with iounmap.
+ */
+void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
+{
+       enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
+
+       return __ioremap_caller(phys_addr, size, pcm,
+                               __builtin_return_address(0));
+}
+EXPORT_SYMBOL_GPL(ioremap_uc);
+
 /**
  * ioremap_wc  -       map memory into CPU space write combined
  * @phys_addr: bus address of the memory
@@ -258,14 +299,28 @@ EXPORT_SYMBOL(ioremap_nocache);
  */
 void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
 {
-       if (pat_enabled)
-               return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
+       return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
                                        __builtin_return_address(0));
-       else
-               return ioremap_nocache(phys_addr, size);
 }
 EXPORT_SYMBOL(ioremap_wc);
 
+/**
+ * ioremap_wt  -       map memory into CPU space write through
+ * @phys_addr: bus address of the memory
+ * @size:      size of the resource to map
+ *
+ * This version of ioremap ensures that the memory is marked write through.
+ * Write through stores data into memory while keeping the cache up-to-date.
+ *
+ * Must be freed with iounmap.
+ */
+void __iomem *ioremap_wt(resource_size_t phys_addr, unsigned long size)
+{
+       return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WT,
+                                       __builtin_return_address(0));
+}
+EXPORT_SYMBOL(ioremap_wt);
+
 void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
 {
        return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
@@ -331,7 +386,7 @@ void iounmap(volatile void __iomem *addr)
 }
 EXPORT_SYMBOL(iounmap);
 
-int arch_ioremap_pud_supported(void)
+int __init arch_ioremap_pud_supported(void)
 {
 #ifdef CONFIG_X86_64
        return cpu_has_gbpages;
@@ -340,7 +395,7 @@ int arch_ioremap_pud_supported(void)
 #endif
 }
 
-int arch_ioremap_pmd_supported(void)
+int __init arch_ioremap_pmd_supported(void)
 {
        return cpu_has_pse;
 }
@@ -353,18 +408,18 @@ void *xlate_dev_mem_ptr(phys_addr_t phys)
 {
        unsigned long start  = phys &  PAGE_MASK;
        unsigned long offset = phys & ~PAGE_MASK;
-       unsigned long vaddr;
+       void *vaddr;
 
        /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
        if (page_is_ram(start >> PAGE_SHIFT))
                return __va(phys);
 
-       vaddr = (unsigned long)ioremap_cache(start, PAGE_SIZE);
+       vaddr = ioremap_cache(start, PAGE_SIZE);
        /* Only add the offset on success and return NULL if the ioremap() failed: */
        if (vaddr)
                vaddr += offset;
 
-       return (void *)vaddr;
+       return vaddr;
 }
 
 void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
@@ -373,7 +428,6 @@ void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
                return;
 
        iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
-       return;
 }
 
 static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
index c439ec47821601c5b594bc1eec5abc529c5fd012..7a657f58bbea152057262a61e325c169f78bc516 100644 (file)
 #include <linux/syscalls.h>
 #include <linux/sched/sysctl.h>
 
-#include <asm/i387.h>
 #include <asm/insn.h>
 #include <asm/mman.h>
 #include <asm/mmu_context.h>
 #include <asm/mpx.h>
 #include <asm/processor.h>
-#include <asm/fpu-internal.h>
+#include <asm/fpu/internal.h>
+
+#define CREATE_TRACE_POINTS
+#include <asm/trace/mpx.h>
 
 static const char *mpx_mapping_name(struct vm_area_struct *vma)
 {
@@ -32,6 +34,22 @@ static int is_mpx_vma(struct vm_area_struct *vma)
        return (vma->vm_ops == &mpx_vma_ops);
 }
 
+static inline unsigned long mpx_bd_size_bytes(struct mm_struct *mm)
+{
+       if (is_64bit_mm(mm))
+               return MPX_BD_SIZE_BYTES_64;
+       else
+               return MPX_BD_SIZE_BYTES_32;
+}
+
+static inline unsigned long mpx_bt_size_bytes(struct mm_struct *mm)
+{
+       if (is_64bit_mm(mm))
+               return MPX_BT_SIZE_BYTES_64;
+       else
+               return MPX_BT_SIZE_BYTES_32;
+}
+
 /*
  * This is really a simplified "vm_mmap". it only handles MPX
  * bounds tables (the bounds directory is user-allocated).
@@ -47,8 +65,8 @@ static unsigned long mpx_mmap(unsigned long len)
        vm_flags_t vm_flags;
        struct vm_area_struct *vma;
 
-       /* Only bounds table and bounds directory can be allocated here */
-       if (len != MPX_BD_SIZE_BYTES && len != MPX_BT_SIZE_BYTES)
+       /* Only bounds table can be allocated here */
+       if (len != mpx_bt_size_bytes(mm))
                return -EINVAL;
 
        down_write(&mm->mmap_sem);
@@ -272,10 +290,9 @@ bad_opcode:
  *
  * The caller is expected to kfree() the returned siginfo_t.
  */
-siginfo_t *mpx_generate_siginfo(struct pt_regs *regs,
-                               struct xsave_struct *xsave_buf)
+siginfo_t *mpx_generate_siginfo(struct pt_regs *regs)
 {
-       struct bndreg *bndregs, *bndreg;
+       const struct bndreg *bndregs, *bndreg;
        siginfo_t *info = NULL;
        struct insn insn;
        uint8_t bndregno;
@@ -295,8 +312,8 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs,
                err = -EINVAL;
                goto err_out;
        }
-       /* get the bndregs _area_ of the xsave structure */
-       bndregs = get_xsave_addr(xsave_buf, XSTATE_BNDREGS);
+       /* get bndregs field from current task's xsave area */
+       bndregs = get_xsave_field_ptr(XSTATE_BNDREGS);
        if (!bndregs) {
                err = -EINVAL;
                goto err_out;
@@ -334,6 +351,7 @@ siginfo_t *mpx_generate_siginfo(struct pt_regs *regs,
                err = -EINVAL;
                goto err_out;
        }
+       trace_mpx_bounds_register_exception(info->si_addr, bndreg);
        return info;
 err_out:
        /* info might be NULL, but kfree() handles that */
@@ -341,25 +359,18 @@ err_out:
        return ERR_PTR(err);
 }
 
-static __user void *task_get_bounds_dir(struct task_struct *tsk)
+static __user void *mpx_get_bounds_dir(void)
 {
-       struct bndcsr *bndcsr;
+       const struct bndcsr *bndcsr;
 
        if (!cpu_feature_enabled(X86_FEATURE_MPX))
                return MPX_INVALID_BOUNDS_DIR;
 
-       /*
-        * 32-bit binaries on 64-bit kernels are currently
-        * unsupported.
-        */
-       if (IS_ENABLED(CONFIG_X86_64) && test_thread_flag(TIF_IA32))
-               return MPX_INVALID_BOUNDS_DIR;
        /*
         * The bounds directory pointer is stored in a register
         * only accessible if we first do an xsave.
         */
-       fpu_save_init(&tsk->thread.fpu);
-       bndcsr = get_xsave_addr(&tsk->thread.fpu.state->xsave, XSTATE_BNDCSR);
+       bndcsr = get_xsave_field_ptr(XSTATE_BNDCSR);
        if (!bndcsr)
                return MPX_INVALID_BOUNDS_DIR;
 
@@ -378,10 +389,10 @@ static __user void *task_get_bounds_dir(struct task_struct *tsk)
                (bndcsr->bndcfgu & MPX_BNDCFG_ADDR_MASK);
 }
 
-int mpx_enable_management(struct task_struct *tsk)
+int mpx_enable_management(void)
 {
        void __user *bd_base = MPX_INVALID_BOUNDS_DIR;
-       struct mm_struct *mm = tsk->mm;
+       struct mm_struct *mm = current->mm;
        int ret = 0;
 
        /*
@@ -390,11 +401,12 @@ int mpx_enable_management(struct task_struct *tsk)
         * directory into XSAVE/XRSTOR Save Area and enable MPX through
         * XRSTOR instruction.
         *
-        * fpu_xsave() is expected to be very expensive. Storing the bounds
-        * directory here means that we do not have to do xsave in the unmap
-        * path; we can just use mm->bd_addr instead.
+        * The copy_xregs_to_kernel() beneath get_xsave_field_ptr() is
+        * expected to be relatively expensive. Storing the bounds
+        * directory here means that we do not have to do xsave in the
+        * unmap path; we can just use mm->bd_addr instead.
         */
-       bd_base = task_get_bounds_dir(tsk);
+       bd_base = mpx_get_bounds_dir();
        down_write(&mm->mmap_sem);
        mm->bd_addr = bd_base;
        if (mm->bd_addr == MPX_INVALID_BOUNDS_DIR)
@@ -404,7 +416,7 @@ int mpx_enable_management(struct task_struct *tsk)
        return ret;
 }
 
-int mpx_disable_management(struct task_struct *tsk)
+int mpx_disable_management(void)
 {
        struct mm_struct *mm = current->mm;
 
@@ -417,29 +429,59 @@ int mpx_disable_management(struct task_struct *tsk)
        return 0;
 }
 
+static int mpx_cmpxchg_bd_entry(struct mm_struct *mm,
+               unsigned long *curval,
+               unsigned long __user *addr,
+               unsigned long old_val, unsigned long new_val)
+{
+       int ret;
+       /*
+        * user_atomic_cmpxchg_inatomic() actually uses sizeof()
+        * the pointer that we pass to it to figure out how much
+        * data to cmpxchg.  We have to be careful here not to
+        * pass a pointer to a 64-bit data type when we only want
+        * a 32-bit copy.
+        */
+       if (is_64bit_mm(mm)) {
+               ret = user_atomic_cmpxchg_inatomic(curval,
+                               addr, old_val, new_val);
+       } else {
+               u32 uninitialized_var(curval_32);
+               u32 old_val_32 = old_val;
+               u32 new_val_32 = new_val;
+               u32 __user *addr_32 = (u32 __user *)addr;
+
+               ret = user_atomic_cmpxchg_inatomic(&curval_32,
+                               addr_32, old_val_32, new_val_32);
+               *curval = curval_32;
+       }
+       return ret;
+}
+
 /*
- * With 32-bit mode, MPX_BT_SIZE_BYTES is 4MB, and the size of each
- * bounds table is 16KB. With 64-bit mode, MPX_BT_SIZE_BYTES is 2GB,
+ * With 32-bit mode, a bounds directory is 4MB, and the size of each
+ * bounds table is 16KB. With 64-bit mode, a bounds directory is 2GB,
  * and the size of each bounds table is 4MB.
  */
-static int allocate_bt(long __user *bd_entry)
+static int allocate_bt(struct mm_struct *mm, long __user *bd_entry)
 {
        unsigned long expected_old_val = 0;
        unsigned long actual_old_val = 0;
        unsigned long bt_addr;
+       unsigned long bd_new_entry;
        int ret = 0;
 
        /*
         * Carve the virtual space out of userspace for the new
         * bounds table:
         */
-       bt_addr = mpx_mmap(MPX_BT_SIZE_BYTES);
+       bt_addr = mpx_mmap(mpx_bt_size_bytes(mm));
        if (IS_ERR((void *)bt_addr))
                return PTR_ERR((void *)bt_addr);
        /*
         * Set the valid flag (kinda like _PAGE_PRESENT in a pte)
         */
-       bt_addr = bt_addr | MPX_BD_ENTRY_VALID_FLAG;
+       bd_new_entry = bt_addr | MPX_BD_ENTRY_VALID_FLAG;
 
        /*
         * Go poke the address of the new bounds table in to the
@@ -452,8 +494,8 @@ static int allocate_bt(long __user *bd_entry)
         * mmap_sem at this point, unlike some of the other part
         * of the MPX code that have to pagefault_disable().
         */
-       ret = user_atomic_cmpxchg_inatomic(&actual_old_val, bd_entry,
-                                          expected_old_val, bt_addr);
+       ret = mpx_cmpxchg_bd_entry(mm, &actual_old_val, bd_entry,
+                                  expected_old_val, bd_new_entry);
        if (ret)
                goto out_unmap;
 
@@ -481,9 +523,10 @@ static int allocate_bt(long __user *bd_entry)
                ret = -EINVAL;
                goto out_unmap;
        }
+       trace_mpx_new_bounds_table(bt_addr);
        return 0;
 out_unmap:
-       vm_munmap(bt_addr & MPX_BT_ADDR_MASK, MPX_BT_SIZE_BYTES);
+       vm_munmap(bt_addr, mpx_bt_size_bytes(mm));
        return ret;
 }
 
@@ -498,12 +541,13 @@ out_unmap:
  * bound table is 16KB. With 64-bit mode, the size of BD is 2GB,
  * and the size of each bound table is 4MB.
  */
-static int do_mpx_bt_fault(struct xsave_struct *xsave_buf)
+static int do_mpx_bt_fault(void)
 {
        unsigned long bd_entry, bd_base;
-       struct bndcsr *bndcsr;
+       const struct bndcsr *bndcsr;
+       struct mm_struct *mm = current->mm;
 
-       bndcsr = get_xsave_addr(xsave_buf, XSTATE_BNDCSR);
+       bndcsr = get_xsave_field_ptr(XSTATE_BNDCSR);
        if (!bndcsr)
                return -EINVAL;
        /*
@@ -520,13 +564,13 @@ static int do_mpx_bt_fault(struct xsave_struct *xsave_buf)
         * the directory is.
         */
        if ((bd_entry < bd_base) ||
-           (bd_entry >= bd_base + MPX_BD_SIZE_BYTES))
+           (bd_entry >= bd_base + mpx_bd_size_bytes(mm)))
                return -EINVAL;
 
-       return allocate_bt((long __user *)bd_entry);
+       return allocate_bt(mm, (long __user *)bd_entry);
 }
 
-int mpx_handle_bd_fault(struct xsave_struct *xsave_buf)
+int mpx_handle_bd_fault(void)
 {
        /*
         * Userspace never asked us to manage the bounds tables,
@@ -535,7 +579,7 @@ int mpx_handle_bd_fault(struct xsave_struct *xsave_buf)
        if (!kernel_managing_mpx_tables(current->mm))
                return -EINVAL;
 
-       if (do_mpx_bt_fault(xsave_buf)) {
+       if (do_mpx_bt_fault()) {
                force_sig(SIGSEGV, current);
                /*
                 * The force_sig() is essentially "handling" this
@@ -572,29 +616,55 @@ static int mpx_resolve_fault(long __user *addr, int write)
        return 0;
 }
 
+static unsigned long mpx_bd_entry_to_bt_addr(struct mm_struct *mm,
+                                            unsigned long bd_entry)
+{
+       unsigned long bt_addr = bd_entry;
+       int align_to_bytes;
+       /*
+        * Bit 0 in a bt_entry is always the valid bit.
+        */
+       bt_addr &= ~MPX_BD_ENTRY_VALID_FLAG;
+       /*
+        * Tables are naturally aligned at 8-byte boundaries
+        * on 64-bit and 4-byte boundaries on 32-bit.  The
+        * documentation makes it appear that the low bits
+        * are ignored by the hardware, so we do the same.
+        */
+       if (is_64bit_mm(mm))
+               align_to_bytes = 8;
+       else
+               align_to_bytes = 4;
+       bt_addr &= ~(align_to_bytes-1);
+       return bt_addr;
+}
+
 /*
  * Get the base of bounds tables pointed by specific bounds
  * directory entry.
  */
 static int get_bt_addr(struct mm_struct *mm,
-                       long __user *bd_entry, unsigned long *bt_addr)
+                       long __user *bd_entry_ptr,
+                       unsigned long *bt_addr_result)
 {
        int ret;
        int valid_bit;
+       unsigned long bd_entry;
+       unsigned long bt_addr;
 
-       if (!access_ok(VERIFY_READ, (bd_entry), sizeof(*bd_entry)))
+       if (!access_ok(VERIFY_READ, (bd_entry_ptr), sizeof(*bd_entry_ptr)))
                return -EFAULT;
 
        while (1) {
                int need_write = 0;
 
                pagefault_disable();
-               ret = get_user(*bt_addr, bd_entry);
+               ret = get_user(bd_entry, bd_entry_ptr);
                pagefault_enable();
                if (!ret)
                        break;
                if (ret == -EFAULT)
-                       ret = mpx_resolve_fault(bd_entry, need_write);
+                       ret = mpx_resolve_fault(bd_entry_ptr, need_write);
                /*
                 * If we could not resolve the fault, consider it
                 * userspace's fault and error out.
@@ -603,8 +673,8 @@ static int get_bt_addr(struct mm_struct *mm,
                        return ret;
        }
 
-       valid_bit = *bt_addr & MPX_BD_ENTRY_VALID_FLAG;
-       *bt_addr &= MPX_BT_ADDR_MASK;
+       valid_bit = bd_entry & MPX_BD_ENTRY_VALID_FLAG;
+       bt_addr = mpx_bd_entry_to_bt_addr(mm, bd_entry);
 
        /*
         * When the kernel is managing bounds tables, a bounds directory
@@ -613,7 +683,7 @@ static int get_bt_addr(struct mm_struct *mm,
         * data in the address field, we know something is wrong. This
         * -EINVAL return will cause a SIGSEGV.
         */
-       if (!valid_bit && *bt_addr)
+       if (!valid_bit && bt_addr)
                return -EINVAL;
        /*
         * Do we have an completely zeroed bt entry?  That is OK.  It
@@ -624,19 +694,100 @@ static int get_bt_addr(struct mm_struct *mm,
        if (!valid_bit)
                return -ENOENT;
 
+       *bt_addr_result = bt_addr;
        return 0;
 }
 
+static inline int bt_entry_size_bytes(struct mm_struct *mm)
+{
+       if (is_64bit_mm(mm))
+               return MPX_BT_ENTRY_BYTES_64;
+       else
+               return MPX_BT_ENTRY_BYTES_32;
+}
+
+/*
+ * Take a virtual address and turns it in to the offset in bytes
+ * inside of the bounds table where the bounds table entry
+ * controlling 'addr' can be found.
+ */
+static unsigned long mpx_get_bt_entry_offset_bytes(struct mm_struct *mm,
+               unsigned long addr)
+{
+       unsigned long bt_table_nr_entries;
+       unsigned long offset = addr;
+
+       if (is_64bit_mm(mm)) {
+               /* Bottom 3 bits are ignored on 64-bit */
+               offset >>= 3;
+               bt_table_nr_entries = MPX_BT_NR_ENTRIES_64;
+       } else {
+               /* Bottom 2 bits are ignored on 32-bit */
+               offset >>= 2;
+               bt_table_nr_entries = MPX_BT_NR_ENTRIES_32;
+       }
+       /*
+        * We know the size of the table in to which we are
+        * indexing, and we have eliminated all the low bits
+        * which are ignored for indexing.
+        *
+        * Mask out all the high bits which we do not need
+        * to index in to the table.  Note that the tables
+        * are always powers of two so this gives us a proper
+        * mask.
+        */
+       offset &= (bt_table_nr_entries-1);
+       /*
+        * We now have an entry offset in terms of *entries* in
+        * the table.  We need to scale it back up to bytes.
+        */
+       offset *= bt_entry_size_bytes(mm);
+       return offset;
+}
+
+/*
+ * How much virtual address space does a single bounds
+ * directory entry cover?
+ *
+ * Note, we need a long long because 4GB doesn't fit in
+ * to a long on 32-bit.
+ */
+static inline unsigned long bd_entry_virt_space(struct mm_struct *mm)
+{
+       unsigned long long virt_space = (1ULL << boot_cpu_data.x86_virt_bits);
+       if (is_64bit_mm(mm))
+               return virt_space / MPX_BD_NR_ENTRIES_64;
+       else
+               return virt_space / MPX_BD_NR_ENTRIES_32;
+}
+
 /*
  * Free the backing physical pages of bounds table 'bt_addr'.
  * Assume start...end is within that bounds table.
  */
-static int zap_bt_entries(struct mm_struct *mm,
+static noinline int zap_bt_entries_mapping(struct mm_struct *mm,
                unsigned long bt_addr,
-               unsigned long start, unsigned long end)
+               unsigned long start_mapping, unsigned long end_mapping)
 {
        struct vm_area_struct *vma;
        unsigned long addr, len;
+       unsigned long start;
+       unsigned long end;
+
+       /*
+        * if we 'end' on a boundary, the offset will be 0 which
+        * is not what we want.  Back it up a byte to get the
+        * last bt entry.  Then once we have the entry itself,
+        * move 'end' back up by the table entry size.
+        */
+       start = bt_addr + mpx_get_bt_entry_offset_bytes(mm, start_mapping);
+       end   = bt_addr + mpx_get_bt_entry_offset_bytes(mm, end_mapping - 1);
+       /*
+        * Move end back up by one entry.  Among other things
+        * this ensures that it remains page-aligned and does
+        * not screw up zap_page_range()
+        */
+       end += bt_entry_size_bytes(mm);
 
        /*
         * Find the first overlapping vma. If vma->vm_start > start, there
@@ -648,7 +799,7 @@ static int zap_bt_entries(struct mm_struct *mm,
                return -EINVAL;
 
        /*
-        * A NUMA policy on a VM_MPX VMA could cause this bouds table to
+        * A NUMA policy on a VM_MPX VMA could cause this bounds table to
         * be split. So we need to look across the entire 'start -> end'
         * range of this bounds table, find all of the VM_MPX VMAs, and
         * zap only those.
@@ -666,27 +817,65 @@ static int zap_bt_entries(struct mm_struct *mm,
 
                len = min(vma->vm_end, end) - addr;
                zap_page_range(vma, addr, len, NULL);
+               trace_mpx_unmap_zap(addr, addr+len);
 
                vma = vma->vm_next;
                addr = vma->vm_start;
        }
-
        return 0;
 }
 
-static int unmap_single_bt(struct mm_struct *mm,
+static unsigned long mpx_get_bd_entry_offset(struct mm_struct *mm,
+               unsigned long addr)
+{
+       /*
+        * There are several ways to derive the bd offsets.  We
+        * use the following approach here:
+        * 1. We know the size of the virtual address space
+        * 2. We know the number of entries in a bounds table
+        * 3. We know that each entry covers a fixed amount of
+        *    virtual address space.
+        * So, we can just divide the virtual address by the
+        * virtual space used by one entry to determine which
+        * entry "controls" the given virtual address.
+        */
+       if (is_64bit_mm(mm)) {
+               int bd_entry_size = 8; /* 64-bit pointer */
+               /*
+                * Take the 64-bit addressing hole in to account.
+                */
+               addr &= ((1UL << boot_cpu_data.x86_virt_bits) - 1);
+               return (addr / bd_entry_virt_space(mm)) * bd_entry_size;
+       } else {
+               int bd_entry_size = 4; /* 32-bit pointer */
+               /*
+                * 32-bit has no hole so this case needs no mask
+                */
+               return (addr / bd_entry_virt_space(mm)) * bd_entry_size;
+       }
+       /*
+        * The two return calls above are exact copies.  If we
+        * pull out a single copy and put it in here, gcc won't
+        * realize that we're doing a power-of-2 divide and use
+        * shifts.  It uses a real divide.  If we put them up
+        * there, it manages to figure it out (gcc 4.8.3).
+        */
+}
+
+static int unmap_entire_bt(struct mm_struct *mm,
                long __user *bd_entry, unsigned long bt_addr)
 {
        unsigned long expected_old_val = bt_addr | MPX_BD_ENTRY_VALID_FLAG;
-       unsigned long actual_old_val = 0;
+       unsigned long uninitialized_var(actual_old_val);
        int ret;
 
        while (1) {
                int need_write = 1;
+               unsigned long cleared_bd_entry = 0;
 
                pagefault_disable();
-               ret = user_atomic_cmpxchg_inatomic(&actual_old_val, bd_entry,
-                                                  expected_old_val, 0);
+               ret = mpx_cmpxchg_bd_entry(mm, &actual_old_val,
+                               bd_entry, expected_old_val, cleared_bd_entry);
                pagefault_enable();
                if (!ret)
                        break;
@@ -705,9 +894,8 @@ static int unmap_single_bt(struct mm_struct *mm,
        if (actual_old_val != expected_old_val) {
                /*
                 * Someone else raced with us to unmap the table.
-                * There was no bounds table pointed to by the
-                * directory, so declare success.  Somebody freed
-                * it.
+                * That is OK, since we were both trying to do
+                * the same thing.  Declare success.
                 */
                if (!actual_old_val)
                        return 0;
@@ -720,176 +908,113 @@ static int unmap_single_bt(struct mm_struct *mm,
                 */
                return -EINVAL;
        }
-
        /*
         * Note, we are likely being called under do_munmap() already. To
         * avoid recursion, do_munmap() will check whether it comes
         * from one bounds table through VM_MPX flag.
         */
-       return do_munmap(mm, bt_addr, MPX_BT_SIZE_BYTES);
+       return do_munmap(mm, bt_addr, mpx_bt_size_bytes(mm));
 }
 
-/*
- * If the bounds table pointed by bounds directory 'bd_entry' is
- * not shared, unmap this whole bounds table. Otherwise, only free
- * those backing physical pages of bounds table entries covered
- * in this virtual address region start...end.
- */
-static int unmap_shared_bt(struct mm_struct *mm,
-               long __user *bd_entry, unsigned long start,
-               unsigned long end, bool prev_shared, bool next_shared)
+static int try_unmap_single_bt(struct mm_struct *mm,
+              unsigned long start, unsigned long end)
 {
-       unsigned long bt_addr;
-       int ret;
-
-       ret = get_bt_addr(mm, bd_entry, &bt_addr);
+       struct vm_area_struct *next;
+       struct vm_area_struct *prev;
        /*
-        * We could see an "error" ret for not-present bounds
-        * tables (not really an error), or actual errors, but
-        * stop unmapping either way.
+        * "bta" == Bounds Table Area: the area controlled by the
+        * bounds table that we are unmapping.
         */
-       if (ret)
-               return ret;
-
-       if (prev_shared && next_shared)
-               ret = zap_bt_entries(mm, bt_addr,
-                               bt_addr+MPX_GET_BT_ENTRY_OFFSET(start),
-                               bt_addr+MPX_GET_BT_ENTRY_OFFSET(end));
-       else if (prev_shared)
-               ret = zap_bt_entries(mm, bt_addr,
-                               bt_addr+MPX_GET_BT_ENTRY_OFFSET(start),
-                               bt_addr+MPX_BT_SIZE_BYTES);
-       else if (next_shared)
-               ret = zap_bt_entries(mm, bt_addr, bt_addr,
-                               bt_addr+MPX_GET_BT_ENTRY_OFFSET(end));
-       else
-               ret = unmap_single_bt(mm, bd_entry, bt_addr);
-
-       return ret;
-}
-
-/*
- * A virtual address region being munmap()ed might share bounds table
- * with adjacent VMAs. We only need to free the backing physical
- * memory of these shared bounds tables entries covered in this virtual
- * address region.
- */
-static int unmap_edge_bts(struct mm_struct *mm,
-               unsigned long start, unsigned long end)
-{
+       unsigned long bta_start_vaddr = start & ~(bd_entry_virt_space(mm)-1);
+       unsigned long bta_end_vaddr = bta_start_vaddr + bd_entry_virt_space(mm);
+       unsigned long uninitialized_var(bt_addr);
+       void __user *bde_vaddr;
        int ret;
-       long __user *bde_start, *bde_end;
-       struct vm_area_struct *prev, *next;
-       bool prev_shared = false, next_shared = false;
-
-       bde_start = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(start);
-       bde_end = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(end-1);
-
        /*
-        * Check whether bde_start and bde_end are shared with adjacent
-        * VMAs.
-        *
-        * We already unliked the VMAs from the mm's rbtree so 'start'
+        * We already unlinked the VMAs from the mm's rbtree so 'start'
         * is guaranteed to be in a hole. This gets us the first VMA
         * before the hole in to 'prev' and the next VMA after the hole
         * in to 'next'.
         */
        next = find_vma_prev(mm, start, &prev);
-       if (prev && (mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(prev->vm_end-1))
-                       == bde_start)
-               prev_shared = true;
-       if (next && (mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(next->vm_start))
-                       == bde_end)
-               next_shared = true;
-
        /*
-        * This virtual address region being munmap()ed is only
-        * covered by one bounds table.
-        *
-        * In this case, if this table is also shared with adjacent
-        * VMAs, only part of the backing physical memory of the bounds
-        * table need be freeed. Otherwise the whole bounds table need
-        * be unmapped.
-        */
-       if (bde_start == bde_end) {
-               return unmap_shared_bt(mm, bde_start, start, end,
-                               prev_shared, next_shared);
+        * Do not count other MPX bounds table VMAs as neighbors.
+        * Although theoretically possible, we do not allow bounds
+        * tables for bounds tables so our heads do not explode.
+        * If we count them as neighbors here, we may end up with
+        * lots of tables even though we have no actual table
+        * entries in use.
+        */
+       while (next && is_mpx_vma(next))
+               next = next->vm_next;
+       while (prev && is_mpx_vma(prev))
+               prev = prev->vm_prev;
+       /*
+        * We know 'start' and 'end' lie within an area controlled
+        * by a single bounds table.  See if there are any other
+        * VMAs controlled by that bounds table.  If there are not
+        * then we can "expand" the are we are unmapping to possibly
+        * cover the entire table.
+        */
+       next = find_vma_prev(mm, start, &prev);
+       if ((!prev || prev->vm_end <= bta_start_vaddr) &&
+           (!next || next->vm_start >= bta_end_vaddr)) {
+               /*
+                * No neighbor VMAs controlled by same bounds
+                * table.  Try to unmap the whole thing
+                */
+               start = bta_start_vaddr;
+               end = bta_end_vaddr;
        }
 
+       bde_vaddr = mm->bd_addr + mpx_get_bd_entry_offset(mm, start);
+       ret = get_bt_addr(mm, bde_vaddr, &bt_addr);
        /*
-        * If more than one bounds tables are covered in this virtual
-        * address region being munmap()ed, we need to separately check
-        * whether bde_start and bde_end are shared with adjacent VMAs.
+        * No bounds table there, so nothing to unmap.
         */
-       ret = unmap_shared_bt(mm, bde_start, start, end, prev_shared, false);
-       if (ret)
-               return ret;
-       ret = unmap_shared_bt(mm, bde_end, start, end, false, next_shared);
+       if (ret == -ENOENT) {
+               ret = 0;
+               return 0;
+       }
        if (ret)
                return ret;
-
-       return 0;
+       /*
+        * We are unmapping an entire table.  Either because the
+        * unmap that started this whole process was large enough
+        * to cover an entire table, or that the unmap was small
+        * but was the area covered by a bounds table.
+        */
+       if ((start == bta_start_vaddr) &&
+           (end == bta_end_vaddr))
+               return unmap_entire_bt(mm, bde_vaddr, bt_addr);
+       return zap_bt_entries_mapping(mm, bt_addr, start, end);
 }
 
 static int mpx_unmap_tables(struct mm_struct *mm,
                unsigned long start, unsigned long end)
 {
-       int ret;
-       long __user *bd_entry, *bde_start, *bde_end;
-       unsigned long bt_addr;
-
-       /*
-        * "Edge" bounds tables are those which are being used by the region
-        * (start -> end), but that may be shared with adjacent areas.  If they
-        * turn out to be completely unshared, they will be freed.  If they are
-        * shared, we will free the backing store (like an MADV_DONTNEED) for
-        * areas used by this region.
-        */
-       ret = unmap_edge_bts(mm, start, end);
-       switch (ret) {
-               /* non-present tables are OK */
-               case 0:
-               case -ENOENT:
-                       /* Success, or no tables to unmap */
-                       break;
-               case -EINVAL:
-               case -EFAULT:
-               default:
-                       return ret;
-       }
-
-       /*
-        * Only unmap the bounds table that are
-        *   1. fully covered
-        *   2. not at the edges of the mapping, even if full aligned
-        */
-       bde_start = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(start);
-       bde_end = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(end-1);
-       for (bd_entry = bde_start + 1; bd_entry < bde_end; bd_entry++) {
-               ret = get_bt_addr(mm, bd_entry, &bt_addr);
-               switch (ret) {
-                       case 0:
-                               break;
-                       case -ENOENT:
-                               /* No table here, try the next one */
-                               continue;
-                       case -EINVAL:
-                       case -EFAULT:
-                       default:
-                               /*
-                                * Note: we are being strict here.
-                                * Any time we run in to an issue
-                                * unmapping tables, we stop and
-                                * SIGSEGV.
-                                */
-                               return ret;
-               }
-
-               ret = unmap_single_bt(mm, bd_entry, bt_addr);
+       unsigned long one_unmap_start;
+       trace_mpx_unmap_search(start, end);
+
+       one_unmap_start = start;
+       while (one_unmap_start < end) {
+               int ret;
+               unsigned long next_unmap_start = ALIGN(one_unmap_start+1,
+                                                      bd_entry_virt_space(mm));
+               unsigned long one_unmap_end = end;
+               /*
+                * if the end is beyond the current bounds table,
+                * move it back so we only deal with a single one
+                * at a time
+                */
+               if (one_unmap_end > next_unmap_start)
+                       one_unmap_end = next_unmap_start;
+               ret = try_unmap_single_bt(mm, one_unmap_start, one_unmap_end);
                if (ret)
                        return ret;
-       }
 
+               one_unmap_start = next_unmap_start;
+       }
        return 0;
 }
 
index 6629f397b4675a3c258a7e54fde7cb8367d2c185..8ff686aa7e8c23d8faec2bd0ff27491114897854 100644 (file)
@@ -9,6 +9,7 @@
 #include <linux/random.h>
 #include <linux/kernel.h>
 #include <linux/mm.h>
+#include <linux/vmalloc.h>
 
 #include <asm/cacheflush.h>
 #include <asm/pgtable.h>
index 89af288ec6740cfd793a4c4804e939bb023e9453..727158cb3b3c91111f7bfe63a3b2fd9744972c1a 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/percpu.h>
 #include <linux/gfp.h>
 #include <linux/pci.h>
+#include <linux/vmalloc.h>
 
 #include <asm/e820.h>
 #include <asm/processor.h>
@@ -129,16 +130,15 @@ within(unsigned long addr, unsigned long start, unsigned long end)
  */
 void clflush_cache_range(void *vaddr, unsigned int size)
 {
-       void *vend = vaddr + size - 1;
+       unsigned long clflush_mask = boot_cpu_data.x86_clflush_size - 1;
+       void *vend = vaddr + size;
+       void *p;
 
        mb();
 
-       for (; vaddr < vend; vaddr += boot_cpu_data.x86_clflush_size)
-               clflushopt(vaddr);
-       /*
-        * Flush any possible final partial cacheline:
-        */
-       clflushopt(vend);
+       for (p = (void *)((unsigned long)vaddr & ~clflush_mask);
+            p < vend; p += boot_cpu_data.x86_clflush_size)
+               clflushopt(p);
 
        mb();
 }
@@ -418,13 +418,11 @@ phys_addr_t slow_virt_to_phys(void *__virt_addr)
        phys_addr_t phys_addr;
        unsigned long offset;
        enum pg_level level;
-       unsigned long psize;
        unsigned long pmask;
        pte_t *pte;
 
        pte = lookup_address(virt_addr, &level);
        BUG_ON(!pte);
-       psize = page_level_size(level);
        pmask = page_level_mask(level);
        offset = virt_addr & ~pmask;
        phys_addr = (phys_addr_t)pte_pfn(*pte) << PAGE_SHIFT;
@@ -1468,6 +1466,9 @@ int _set_memory_uc(unsigned long addr, int numpages)
 {
        /*
         * for now UC MINUS. see comments in ioremap_nocache()
+        * If you really need strong UC use ioremap_uc(), but note
+        * that you cannot override IO areas with set_memory_*() as
+        * these helpers cannot work with IO memory.
         */
        return change_page_attr_set(&addr, numpages,
                                    cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
@@ -1502,12 +1503,10 @@ EXPORT_SYMBOL(set_memory_uc);
 static int _set_memory_array(unsigned long *addr, int addrinarray,
                enum page_cache_mode new_type)
 {
+       enum page_cache_mode set_type;
        int i, j;
        int ret;
 
-       /*
-        * for now UC MINUS. see comments in ioremap_nocache()
-        */
        for (i = 0; i < addrinarray; i++) {
                ret = reserve_memtype(__pa(addr[i]), __pa(addr[i]) + PAGE_SIZE,
                                        new_type, NULL);
@@ -1515,9 +1514,12 @@ static int _set_memory_array(unsigned long *addr, int addrinarray,
                        goto out_free;
        }
 
+       /* If WC, set to UC- first and then WC */
+       set_type = (new_type == _PAGE_CACHE_MODE_WC) ?
+                               _PAGE_CACHE_MODE_UC_MINUS : new_type;
+
        ret = change_page_attr_set(addr, addrinarray,
-                                  cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS),
-                                  1);
+                                  cachemode2pgprot(set_type), 1);
 
        if (!ret && new_type == _PAGE_CACHE_MODE_WC)
                ret = change_page_attr_set_clr(addr, addrinarray,
@@ -1549,6 +1551,12 @@ int set_memory_array_wc(unsigned long *addr, int addrinarray)
 }
 EXPORT_SYMBOL(set_memory_array_wc);
 
+int set_memory_array_wt(unsigned long *addr, int addrinarray)
+{
+       return _set_memory_array(addr, addrinarray, _PAGE_CACHE_MODE_WT);
+}
+EXPORT_SYMBOL_GPL(set_memory_array_wt);
+
 int _set_memory_wc(unsigned long addr, int numpages)
 {
        int ret;
@@ -1571,27 +1579,42 @@ int set_memory_wc(unsigned long addr, int numpages)
 {
        int ret;
 
-       if (!pat_enabled)
-               return set_memory_uc(addr, numpages);
-
        ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
                _PAGE_CACHE_MODE_WC, NULL);
        if (ret)
-               goto out_err;
+               return ret;
 
        ret = _set_memory_wc(addr, numpages);
        if (ret)
-               goto out_free;
-
-       return 0;
+               free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
 
-out_free:
-       free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
-out_err:
        return ret;
 }
 EXPORT_SYMBOL(set_memory_wc);
 
+int _set_memory_wt(unsigned long addr, int numpages)
+{
+       return change_page_attr_set(&addr, numpages,
+                                   cachemode2pgprot(_PAGE_CACHE_MODE_WT), 0);
+}
+
+int set_memory_wt(unsigned long addr, int numpages)
+{
+       int ret;
+
+       ret = reserve_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE,
+                             _PAGE_CACHE_MODE_WT, NULL);
+       if (ret)
+               return ret;
+
+       ret = _set_memory_wt(addr, numpages);
+       if (ret)
+               free_memtype(__pa(addr), __pa(addr) + numpages * PAGE_SIZE);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(set_memory_wt);
+
 int _set_memory_wb(unsigned long addr, int numpages)
 {
        /* WB cache mode is hard wired to all cache attribute bits being 0 */
@@ -1682,6 +1705,7 @@ static int _set_pages_array(struct page **pages, int addrinarray,
 {
        unsigned long start;
        unsigned long end;
+       enum page_cache_mode set_type;
        int i;
        int free_idx;
        int ret;
@@ -1695,8 +1719,12 @@ static int _set_pages_array(struct page **pages, int addrinarray,
                        goto err_out;
        }
 
+       /* If WC, set to UC- first and then WC */
+       set_type = (new_type == _PAGE_CACHE_MODE_WC) ?
+                               _PAGE_CACHE_MODE_UC_MINUS : new_type;
+
        ret = cpa_set_pages_array(pages, addrinarray,
-                       cachemode2pgprot(_PAGE_CACHE_MODE_UC_MINUS));
+                                 cachemode2pgprot(set_type));
        if (!ret && new_type == _PAGE_CACHE_MODE_WC)
                ret = change_page_attr_set_clr(NULL, addrinarray,
                                               cachemode2pgprot(
@@ -1730,6 +1758,12 @@ int set_pages_array_wc(struct page **pages, int addrinarray)
 }
 EXPORT_SYMBOL(set_pages_array_wc);
 
+int set_pages_array_wt(struct page **pages, int addrinarray)
+{
+       return _set_pages_array(pages, addrinarray, _PAGE_CACHE_MODE_WT);
+}
+EXPORT_SYMBOL_GPL(set_pages_array_wt);
+
 int set_pages_wb(struct page *page, int numpages)
 {
        unsigned long addr = (unsigned long)page_address(page);
index 35af6771a95ad6c126cca1f352b896998116876e..188e3e07eeeba7c0eb6555c138a16e97c3e5d787 100644 (file)
 #include "pat_internal.h"
 #include "mm_internal.h"
 
-#ifdef CONFIG_X86_PAT
-int __read_mostly pat_enabled = 1;
+#undef pr_fmt
+#define pr_fmt(fmt) "" fmt
+
+static bool boot_cpu_done;
+
+static int __read_mostly __pat_enabled = IS_ENABLED(CONFIG_X86_PAT);
 
 static inline void pat_disable(const char *reason)
 {
-       pat_enabled = 0;
-       printk(KERN_INFO "%s\n", reason);
+       __pat_enabled = 0;
+       pr_info("x86/PAT: %s\n", reason);
 }
 
 static int __init nopat(char *str)
@@ -48,13 +52,12 @@ static int __init nopat(char *str)
        return 0;
 }
 early_param("nopat", nopat);
-#else
-static inline void pat_disable(const char *reason)
+
+bool pat_enabled(void)
 {
-       (void)reason;
+       return !!__pat_enabled;
 }
-#endif
-
+EXPORT_SYMBOL_GPL(pat_enabled);
 
 int pat_debug_enable;
 
@@ -65,22 +68,24 @@ static int __init pat_debug_setup(char *str)
 }
 __setup("debugpat", pat_debug_setup);
 
-static u64 __read_mostly boot_pat_state;
-
 #ifdef CONFIG_X86_PAT
 /*
- * X86 PAT uses page flags WC and Uncached together to keep track of
- * memory type of pages that have backing page struct. X86 PAT supports 3
- * different memory types, _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC and
- * _PAGE_CACHE_MODE_UC_MINUS and fourth state where page's memory type has not
- * been changed from its default (value of -1 used to denote this).
- * Note we do not support _PAGE_CACHE_MODE_UC here.
+ * X86 PAT uses page flags arch_1 and uncached together to keep track of
+ * memory type of pages that have backing page struct.
+ *
+ * X86 PAT supports 4 different memory types:
+ *  - _PAGE_CACHE_MODE_WB
+ *  - _PAGE_CACHE_MODE_WC
+ *  - _PAGE_CACHE_MODE_UC_MINUS
+ *  - _PAGE_CACHE_MODE_WT
+ *
+ * _PAGE_CACHE_MODE_WB is the default type.
  */
 
-#define _PGMT_DEFAULT          0
+#define _PGMT_WB               0
 #define _PGMT_WC               (1UL << PG_arch_1)
 #define _PGMT_UC_MINUS         (1UL << PG_uncached)
-#define _PGMT_WB               (1UL << PG_uncached | 1UL << PG_arch_1)
+#define _PGMT_WT               (1UL << PG_uncached | 1UL << PG_arch_1)
 #define _PGMT_MASK             (1UL << PG_uncached | 1UL << PG_arch_1)
 #define _PGMT_CLEAR_MASK       (~_PGMT_MASK)
 
@@ -88,14 +93,14 @@ static inline enum page_cache_mode get_page_memtype(struct page *pg)
 {
        unsigned long pg_flags = pg->flags & _PGMT_MASK;
 
-       if (pg_flags == _PGMT_DEFAULT)
-               return -1;
+       if (pg_flags == _PGMT_WB)
+               return _PAGE_CACHE_MODE_WB;
        else if (pg_flags == _PGMT_WC)
                return _PAGE_CACHE_MODE_WC;
        else if (pg_flags == _PGMT_UC_MINUS)
                return _PAGE_CACHE_MODE_UC_MINUS;
        else
-               return _PAGE_CACHE_MODE_WB;
+               return _PAGE_CACHE_MODE_WT;
 }
 
 static inline void set_page_memtype(struct page *pg,
@@ -112,11 +117,12 @@ static inline void set_page_memtype(struct page *pg,
        case _PAGE_CACHE_MODE_UC_MINUS:
                memtype_flags = _PGMT_UC_MINUS;
                break;
-       case _PAGE_CACHE_MODE_WB:
-               memtype_flags = _PGMT_WB;
+       case _PAGE_CACHE_MODE_WT:
+               memtype_flags = _PGMT_WT;
                break;
+       case _PAGE_CACHE_MODE_WB:
        default:
-               memtype_flags = _PGMT_DEFAULT;
+               memtype_flags = _PGMT_WB;
                break;
        }
 
@@ -174,78 +180,154 @@ static enum page_cache_mode pat_get_cache_mode(unsigned pat_val, char *msg)
  * configuration.
  * Using lower indices is preferred, so we start with highest index.
  */
-void pat_init_cache_modes(void)
+void pat_init_cache_modes(u64 pat)
 {
-       int i;
        enum page_cache_mode cache;
        char pat_msg[33];
-       u64 pat;
+       int i;
 
-       rdmsrl(MSR_IA32_CR_PAT, pat);
        pat_msg[32] = 0;
        for (i = 7; i >= 0; i--) {
                cache = pat_get_cache_mode((pat >> (i * 8)) & 7,
                                           pat_msg + 4 * i);
                update_cache_mode_entry(i, cache);
        }
-       pr_info("PAT configuration [0-7]: %s\n", pat_msg);
+       pr_info("x86/PAT: Configuration [0-7]: %s\n", pat_msg);
 }
 
 #define PAT(x, y)      ((u64)PAT_ ## y << ((x)*8))
 
-void pat_init(void)
+static void pat_bsp_init(u64 pat)
 {
-       u64 pat;
-       bool boot_cpu = !boot_pat_state;
+       u64 tmp_pat;
 
-       if (!pat_enabled)
+       if (!cpu_has_pat) {
+               pat_disable("PAT not supported by CPU.");
                return;
+       }
 
-       if (!cpu_has_pat) {
-               if (!boot_pat_state) {
-                       pat_disable("PAT not supported by CPU.");
-                       return;
-               } else {
-                       /*
-                        * If this happens we are on a secondary CPU, but
-                        * switched to PAT on the boot CPU. We have no way to
-                        * undo PAT.
-                        */
-                       printk(KERN_ERR "PAT enabled, "
-                              "but not supported by secondary CPU\n");
-                       BUG();
-               }
+       if (!pat_enabled())
+               goto done;
+
+       rdmsrl(MSR_IA32_CR_PAT, tmp_pat);
+       if (!tmp_pat) {
+               pat_disable("PAT MSR is 0, disabled.");
+               return;
        }
 
-       /* Set PWT to Write-Combining. All other bits stay the same */
-       /*
-        * PTE encoding used in Linux:
-        *      PAT
-        *      |PCD
-        *      ||PWT
-        *      |||
-        *      000 WB          _PAGE_CACHE_WB
-        *      001 WC          _PAGE_CACHE_WC
-        *      010 UC-         _PAGE_CACHE_UC_MINUS
-        *      011 UC          _PAGE_CACHE_UC
-        * PAT bit unused
-        */
-       pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
-             PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
-
-       /* Boot CPU check */
-       if (!boot_pat_state) {
-               rdmsrl(MSR_IA32_CR_PAT, boot_pat_state);
-               if (!boot_pat_state) {
-                       pat_disable("PAT read returns always zero, disabled.");
-                       return;
-               }
+       wrmsrl(MSR_IA32_CR_PAT, pat);
+
+done:
+       pat_init_cache_modes(pat);
+}
+
+static void pat_ap_init(u64 pat)
+{
+       if (!pat_enabled())
+               return;
+
+       if (!cpu_has_pat) {
+               /*
+                * If this happens we are on a secondary CPU, but switched to
+                * PAT on the boot CPU. We have no way to undo PAT.
+                */
+               panic("x86/PAT: PAT enabled, but not supported by secondary CPU\n");
        }
 
        wrmsrl(MSR_IA32_CR_PAT, pat);
+}
+
+void pat_init(void)
+{
+       u64 pat;
+       struct cpuinfo_x86 *c = &boot_cpu_data;
+
+       if (!pat_enabled()) {
+               /*
+                * No PAT. Emulate the PAT table that corresponds to the two
+                * cache bits, PWT (Write Through) and PCD (Cache Disable). This
+                * setup is the same as the BIOS default setup when the system
+                * has PAT but the "nopat" boot option has been specified. This
+                * emulated PAT table is used when MSR_IA32_CR_PAT returns 0.
+                *
+                * PTE encoding:
+                *
+                *       PCD
+                *       |PWT  PAT
+                *       ||    slot
+                *       00    0    WB : _PAGE_CACHE_MODE_WB
+                *       01    1    WT : _PAGE_CACHE_MODE_WT
+                *       10    2    UC-: _PAGE_CACHE_MODE_UC_MINUS
+                *       11    3    UC : _PAGE_CACHE_MODE_UC
+                *
+                * NOTE: When WC or WP is used, it is redirected to UC- per
+                * the default setup in __cachemode2pte_tbl[].
+                */
+               pat = PAT(0, WB) | PAT(1, WT) | PAT(2, UC_MINUS) | PAT(3, UC) |
+                     PAT(4, WB) | PAT(5, WT) | PAT(6, UC_MINUS) | PAT(7, UC);
 
-       if (boot_cpu)
-               pat_init_cache_modes();
+       } else if ((c->x86_vendor == X86_VENDOR_INTEL) &&
+                  (((c->x86 == 0x6) && (c->x86_model <= 0xd)) ||
+                   ((c->x86 == 0xf) && (c->x86_model <= 0x6)))) {
+               /*
+                * PAT support with the lower four entries. Intel Pentium 2,
+                * 3, M, and 4 are affected by PAT errata, which makes the
+                * upper four entries unusable. To be on the safe side, we don't
+                * use those.
+                *
+                *  PTE encoding:
+                *      PAT
+                *      |PCD
+                *      ||PWT  PAT
+                *      |||    slot
+                *      000    0    WB : _PAGE_CACHE_MODE_WB
+                *      001    1    WC : _PAGE_CACHE_MODE_WC
+                *      010    2    UC-: _PAGE_CACHE_MODE_UC_MINUS
+                *      011    3    UC : _PAGE_CACHE_MODE_UC
+                * PAT bit unused
+                *
+                * NOTE: When WT or WP is used, it is redirected to UC- per
+                * the default setup in __cachemode2pte_tbl[].
+                */
+               pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
+                     PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, UC);
+       } else {
+               /*
+                * Full PAT support.  We put WT in slot 7 to improve
+                * robustness in the presence of errata that might cause
+                * the high PAT bit to be ignored.  This way, a buggy slot 7
+                * access will hit slot 3, and slot 3 is UC, so at worst
+                * we lose performance without causing a correctness issue.
+                * Pentium 4 erratum N46 is an example for such an erratum,
+                * although we try not to use PAT at all on affected CPUs.
+                *
+                *  PTE encoding:
+                *      PAT
+                *      |PCD
+                *      ||PWT  PAT
+                *      |||    slot
+                *      000    0    WB : _PAGE_CACHE_MODE_WB
+                *      001    1    WC : _PAGE_CACHE_MODE_WC
+                *      010    2    UC-: _PAGE_CACHE_MODE_UC_MINUS
+                *      011    3    UC : _PAGE_CACHE_MODE_UC
+                *      100    4    WB : Reserved
+                *      101    5    WC : Reserved
+                *      110    6    UC-: Reserved
+                *      111    7    WT : _PAGE_CACHE_MODE_WT
+                *
+                * The reserved slots are unused, but mapped to their
+                * corresponding types in the presence of PAT errata.
+                */
+               pat = PAT(0, WB) | PAT(1, WC) | PAT(2, UC_MINUS) | PAT(3, UC) |
+                     PAT(4, WB) | PAT(5, WC) | PAT(6, UC_MINUS) | PAT(7, WT);
+       }
+
+       if (!boot_cpu_done) {
+               pat_bsp_init(pat);
+               boot_cpu_done = true;
+       } else {
+               pat_ap_init(pat);
+       }
 }
 
 #undef PAT
@@ -267,9 +349,9 @@ static unsigned long pat_x_mtrr_type(u64 start, u64 end,
         * request is for WB.
         */
        if (req_type == _PAGE_CACHE_MODE_WB) {
-               u8 mtrr_type;
+               u8 mtrr_type, uniform;
 
-               mtrr_type = mtrr_type_lookup(start, end);
+               mtrr_type = mtrr_type_lookup(start, end, &uniform);
                if (mtrr_type != MTRR_TYPE_WRBACK)
                        return _PAGE_CACHE_MODE_UC_MINUS;
 
@@ -324,9 +406,14 @@ static int pat_pagerange_is_ram(resource_size_t start, resource_size_t end)
 
 /*
  * For RAM pages, we use page flags to mark the pages with appropriate type.
- * Here we do two pass:
- * - Find the memtype of all the pages in the range, look for any conflicts
- * - In case of no conflicts, set the new memtype for pages in the range
+ * The page flags are limited to four types, WB (default), WC, WT and UC-.
+ * WP request fails with -EINVAL, and UC gets redirected to UC-.  Setting
+ * a new memory type is only allowed for a page mapped with the default WB
+ * type.
+ *
+ * Here we do two passes:
+ * - Find the memtype of all the pages in the range, look for any conflicts.
+ * - In case of no conflicts, set the new memtype for pages in the range.
  */
 static int reserve_ram_pages_type(u64 start, u64 end,
                                  enum page_cache_mode req_type,
@@ -335,6 +422,12 @@ static int reserve_ram_pages_type(u64 start, u64 end,
        struct page *page;
        u64 pfn;
 
+       if (req_type == _PAGE_CACHE_MODE_WP) {
+               if (new_type)
+                       *new_type = _PAGE_CACHE_MODE_UC_MINUS;
+               return -EINVAL;
+       }
+
        if (req_type == _PAGE_CACHE_MODE_UC) {
                /* We do not support strong UC */
                WARN_ON_ONCE(1);
@@ -346,8 +439,8 @@ static int reserve_ram_pages_type(u64 start, u64 end,
 
                page = pfn_to_page(pfn);
                type = get_page_memtype(page);
-               if (type != -1) {
-                       pr_info("reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
+               if (type != _PAGE_CACHE_MODE_WB) {
+                       pr_info("x86/PAT: reserve_ram_pages_type failed [mem %#010Lx-%#010Lx], track 0x%x, req 0x%x\n",
                                start, end - 1, type, req_type);
                        if (new_type)
                                *new_type = type;
@@ -373,7 +466,7 @@ static int free_ram_pages_type(u64 start, u64 end)
 
        for (pfn = (start >> PAGE_SHIFT); pfn < (end >> PAGE_SHIFT); ++pfn) {
                page = pfn_to_page(pfn);
-               set_page_memtype(page, -1);
+               set_page_memtype(page, _PAGE_CACHE_MODE_WB);
        }
        return 0;
 }
@@ -384,6 +477,7 @@ static int free_ram_pages_type(u64 start, u64 end)
  * - _PAGE_CACHE_MODE_WC
  * - _PAGE_CACHE_MODE_UC_MINUS
  * - _PAGE_CACHE_MODE_UC
+ * - _PAGE_CACHE_MODE_WT
  *
  * If new_type is NULL, function will return an error if it cannot reserve the
  * region with req_type. If new_type is non-NULL, function will return
@@ -400,14 +494,10 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
 
        BUG_ON(start >= end); /* end is exclusive */
 
-       if (!pat_enabled) {
+       if (!pat_enabled()) {
                /* This is identical to page table setting without PAT */
-               if (new_type) {
-                       if (req_type == _PAGE_CACHE_MODE_WC)
-                               *new_type = _PAGE_CACHE_MODE_UC_MINUS;
-                       else
-                               *new_type = req_type;
-               }
+               if (new_type)
+                       *new_type = req_type;
                return 0;
        }
 
@@ -451,9 +541,9 @@ int reserve_memtype(u64 start, u64 end, enum page_cache_mode req_type,
 
        err = rbt_memtype_check_insert(new, new_type);
        if (err) {
-               printk(KERN_INFO "reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
-                      start, end - 1,
-                      cattr_name(new->type), cattr_name(req_type));
+               pr_info("x86/PAT: reserve_memtype failed [mem %#010Lx-%#010Lx], track %s, req %s\n",
+                       start, end - 1,
+                       cattr_name(new->type), cattr_name(req_type));
                kfree(new);
                spin_unlock(&memtype_lock);
 
@@ -475,7 +565,7 @@ int free_memtype(u64 start, u64 end)
        int is_range_ram;
        struct memtype *entry;
 
-       if (!pat_enabled)
+       if (!pat_enabled())
                return 0;
 
        /* Low ISA region is always mapped WB. No need to track */
@@ -497,8 +587,8 @@ int free_memtype(u64 start, u64 end)
        spin_unlock(&memtype_lock);
 
        if (!entry) {
-               printk(KERN_INFO "%s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
-                      current->comm, current->pid, start, end - 1);
+               pr_info("x86/PAT: %s:%d freeing invalid memtype [mem %#010Lx-%#010Lx]\n",
+                       current->comm, current->pid, start, end - 1);
                return -EINVAL;
        }
 
@@ -517,7 +607,7 @@ int free_memtype(u64 start, u64 end)
  * Only to be called when PAT is enabled
  *
  * Returns _PAGE_CACHE_MODE_WB, _PAGE_CACHE_MODE_WC, _PAGE_CACHE_MODE_UC_MINUS
- * or _PAGE_CACHE_MODE_UC
+ * or _PAGE_CACHE_MODE_WT.
  */
 static enum page_cache_mode lookup_memtype(u64 paddr)
 {
@@ -529,16 +619,9 @@ static enum page_cache_mode lookup_memtype(u64 paddr)
 
        if (pat_pagerange_is_ram(paddr, paddr + PAGE_SIZE)) {
                struct page *page;
-               page = pfn_to_page(paddr >> PAGE_SHIFT);
-               rettype = get_page_memtype(page);
-               /*
-                * -1 from get_page_memtype() implies RAM page is in its
-                * default state and not reserved, and hence of type WB
-                */
-               if (rettype == -1)
-                       rettype = _PAGE_CACHE_MODE_WB;
 
-               return rettype;
+               page = pfn_to_page(paddr >> PAGE_SHIFT);
+               return get_page_memtype(page);
        }
 
        spin_lock(&memtype_lock);
@@ -623,13 +706,13 @@ static inline int range_is_allowed(unsigned long pfn, unsigned long size)
        u64 to = from + size;
        u64 cursor = from;
 
-       if (!pat_enabled)
+       if (!pat_enabled())
                return 1;
 
        while (cursor < to) {
                if (!devmem_is_allowed(pfn)) {
-                       printk(KERN_INFO "Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx], PAT prevents it\n",
-                              current->comm, from, to - 1);
+                       pr_info("x86/PAT: Program %s tried to access /dev/mem between [mem %#010Lx-%#010Lx], PAT prevents it\n",
+                               current->comm, from, to - 1);
                        return 0;
                }
                cursor += PAGE_SIZE;
@@ -659,7 +742,7 @@ int phys_mem_access_prot_allowed(struct file *file, unsigned long pfn,
         * caching for the high addresses through the KEN pin, but
         * we maintain the tradition of paranoia in this code.
         */
-       if (!pat_enabled &&
+       if (!pat_enabled() &&
            !(boot_cpu_has(X86_FEATURE_MTRR) ||
              boot_cpu_has(X86_FEATURE_K6_MTRR) ||
              boot_cpu_has(X86_FEATURE_CYRIX_ARR) ||
@@ -698,8 +781,7 @@ int kernel_map_sync_memtype(u64 base, unsigned long size,
                                size;
 
        if (ioremap_change_attr((unsigned long)__va(base), id_sz, pcm) < 0) {
-               printk(KERN_INFO "%s:%d ioremap_change_attr failed %s "
-                       "for [mem %#010Lx-%#010Lx]\n",
+               pr_info("x86/PAT: %s:%d ioremap_change_attr failed %s for [mem %#010Lx-%#010Lx]\n",
                        current->comm, current->pid,
                        cattr_name(pcm),
                        base, (unsigned long long)(base + size-1));
@@ -729,12 +811,12 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
         * the type requested matches the type of first page in the range.
         */
        if (is_ram) {
-               if (!pat_enabled)
+               if (!pat_enabled())
                        return 0;
 
                pcm = lookup_memtype(paddr);
                if (want_pcm != pcm) {
-                       printk(KERN_WARNING "%s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
+                       pr_warn("x86/PAT: %s:%d map pfn RAM range req %s for [mem %#010Lx-%#010Lx], got %s\n",
                                current->comm, current->pid,
                                cattr_name(want_pcm),
                                (unsigned long long)paddr,
@@ -755,13 +837,12 @@ static int reserve_pfn_range(u64 paddr, unsigned long size, pgprot_t *vma_prot,
                if (strict_prot ||
                    !is_new_memtype_allowed(paddr, size, want_pcm, pcm)) {
                        free_memtype(paddr, paddr + size);
-                       printk(KERN_ERR "%s:%d map pfn expected mapping type %s"
-                               " for [mem %#010Lx-%#010Lx], got %s\n",
-                               current->comm, current->pid,
-                               cattr_name(want_pcm),
-                               (unsigned long long)paddr,
-                               (unsigned long long)(paddr + size - 1),
-                               cattr_name(pcm));
+                       pr_err("x86/PAT: %s:%d map pfn expected mapping type %s for [mem %#010Lx-%#010Lx], got %s\n",
+                              current->comm, current->pid,
+                              cattr_name(want_pcm),
+                              (unsigned long long)paddr,
+                              (unsigned long long)(paddr + size - 1),
+                              cattr_name(pcm));
                        return -EINVAL;
                }
                /*
@@ -844,7 +925,7 @@ int track_pfn_remap(struct vm_area_struct *vma, pgprot_t *prot,
                return ret;
        }
 
-       if (!pat_enabled)
+       if (!pat_enabled())
                return 0;
 
        /*
@@ -872,7 +953,7 @@ int track_pfn_insert(struct vm_area_struct *vma, pgprot_t *prot,
 {
        enum page_cache_mode pcm;
 
-       if (!pat_enabled)
+       if (!pat_enabled())
                return 0;
 
        /* Set prot based on lookup */
@@ -913,14 +994,18 @@ void untrack_pfn(struct vm_area_struct *vma, unsigned long pfn,
 
 pgprot_t pgprot_writecombine(pgprot_t prot)
 {
-       if (pat_enabled)
-               return __pgprot(pgprot_val(prot) |
+       return __pgprot(pgprot_val(prot) |
                                cachemode2protval(_PAGE_CACHE_MODE_WC));
-       else
-               return pgprot_noncached(prot);
 }
 EXPORT_SYMBOL_GPL(pgprot_writecombine);
 
+pgprot_t pgprot_writethrough(pgprot_t prot)
+{
+       return __pgprot(pgprot_val(prot) |
+                               cachemode2protval(_PAGE_CACHE_MODE_WT));
+}
+EXPORT_SYMBOL_GPL(pgprot_writethrough);
+
 #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_X86_PAT)
 
 static struct memtype *memtype_get_idx(loff_t pos)
@@ -996,7 +1081,7 @@ static const struct file_operations memtype_fops = {
 
 static int __init pat_memtype_list_init(void)
 {
-       if (pat_enabled) {
+       if (pat_enabled()) {
                debugfs_create_file("pat_memtype_list", S_IRUSR,
                                    arch_debugfs_dir, NULL, &memtype_fops);
        }
index f6411620305d89f89ba7d3430e252355c29fea6b..a739bfc40690b77537ca774b74092fe1beefdcce 100644 (file)
@@ -4,7 +4,7 @@
 extern int pat_debug_enable;
 
 #define dprintk(fmt, arg...) \
-       do { if (pat_debug_enable) printk(KERN_INFO fmt, ##arg); } while (0)
+       do { if (pat_debug_enable) pr_info("x86/PAT: " fmt, ##arg); } while (0)
 
 struct memtype {
        u64                     start;
index 6582adcc8bd935b96df0c428308255fa262710ea..63931080366aaae1626c48b8ec780acd78d09569 100644 (file)
@@ -160,9 +160,9 @@ success:
        return 0;
 
 failure:
-       printk(KERN_INFO "%s:%d conflicting memory types "
-               "%Lx-%Lx %s<->%s\n", current->comm, current->pid, start,
-               end, cattr_name(found_type), cattr_name(match->type));
+       pr_info("x86/PAT: %s:%d conflicting memory types %Lx-%Lx %s<->%s\n",
+               current->comm, current->pid, start, end,
+               cattr_name(found_type), cattr_name(match->type));
        return -EBUSY;
 }
 
index 0b97d2c75df3d5fc05b1caafdadce2972660543d..fb0a9dd1d6e46fc6e6921bf29df9f71e830fdcb8 100644 (file)
@@ -563,16 +563,31 @@ void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
 }
 
 #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+/**
+ * pud_set_huge - setup kernel PUD mapping
+ *
+ * MTRRs can override PAT memory types with 4KiB granularity. Therefore, this
+ * function sets up a huge page only if any of the following conditions are met:
+ *
+ * - MTRRs are disabled, or
+ *
+ * - MTRRs are enabled and the range is completely covered by a single MTRR, or
+ *
+ * - MTRRs are enabled and the corresponding MTRR memory type is WB, which
+ *   has no effect on the requested PAT memory type.
+ *
+ * Callers should try to decrease page size (1GB -> 2MB -> 4K) if the bigger
+ * page mapping attempt fails.
+ *
+ * Returns 1 on success and 0 on failure.
+ */
 int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
 {
-       u8 mtrr;
+       u8 mtrr, uniform;
 
-       /*
-        * Do not use a huge page when the range is covered by non-WB type
-        * of MTRRs.
-        */
-       mtrr = mtrr_type_lookup(addr, addr + PUD_SIZE);
-       if ((mtrr != MTRR_TYPE_WRBACK) && (mtrr != 0xFF))
+       mtrr = mtrr_type_lookup(addr, addr + PUD_SIZE, &uniform);
+       if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
+           (mtrr != MTRR_TYPE_WRBACK))
                return 0;
 
        prot = pgprot_4k_2_large(prot);
@@ -584,17 +599,24 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot)
        return 1;
 }
 
+/**
+ * pmd_set_huge - setup kernel PMD mapping
+ *
+ * See text over pud_set_huge() above.
+ *
+ * Returns 1 on success and 0 on failure.
+ */
 int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
 {
-       u8 mtrr;
+       u8 mtrr, uniform;
 
-       /*
-        * Do not use a huge page when the range is covered by non-WB type
-        * of MTRRs.
-        */
-       mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE);
-       if ((mtrr != MTRR_TYPE_WRBACK) && (mtrr != 0xFF))
+       mtrr = mtrr_type_lookup(addr, addr + PMD_SIZE, &uniform);
+       if ((mtrr != MTRR_TYPE_INVALID) && (!uniform) &&
+           (mtrr != MTRR_TYPE_WRBACK)) {
+               pr_warn_once("%s: Cannot satisfy [mem %#010llx-%#010llx] with a huge-page mapping due to MTRR override.\n",
+                            __func__, addr, addr + PMD_SIZE);
                return 0;
+       }
 
        prot = pgprot_4k_2_large(prot);
 
@@ -605,6 +627,11 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot)
        return 1;
 }
 
+/**
+ * pud_clear_huge - clear kernel PUD mapping when it is set
+ *
+ * Returns 1 on success and 0 on failure (no PUD map is found).
+ */
 int pud_clear_huge(pud_t *pud)
 {
        if (pud_large(*pud)) {
@@ -615,6 +642,11 @@ int pud_clear_huge(pud_t *pud)
        return 0;
 }
 
+/**
+ * pmd_clear_huge - clear kernel PMD mapping when it is set
+ *
+ * Returns 1 on success and 0 on failure (no PMD map is found).
+ */
 int pmd_clear_huge(pmd_t *pmd)
 {
        if (pmd_large(*pmd)) {
index 6440221ced0d4925d3fee4a0c11b424a6b696f2d..4093216b3791311c995bfb2f644deed3845ef708 100644 (file)
@@ -8,7 +8,6 @@
  * of the License.
  */
 #include <linux/linkage.h>
-#include <asm/dwarf2.h>
 
 /*
  * Calling convention :
index 987514396c1e443376bffe70e050b72f86509102..ddeff4844a100de83b52ef9dae9f42974666dc23 100644 (file)
@@ -559,6 +559,13 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
                                if (is_ereg(dst_reg))
                                        EMIT1(0x41);
                                EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
+
+                               /* emit 'movzwl eax, ax' */
+                               if (is_ereg(dst_reg))
+                                       EMIT3(0x45, 0x0F, 0xB7);
+                               else
+                                       EMIT2(0x0F, 0xB7);
+                               EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
                                break;
                        case 32:
                                /* emit 'bswap eax' to swap lower 4 bytes */
@@ -577,6 +584,27 @@ static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image,
                        break;
 
                case BPF_ALU | BPF_END | BPF_FROM_LE:
+                       switch (imm32) {
+                       case 16:
+                               /* emit 'movzwl eax, ax' to zero extend 16-bit
+                                * into 64 bit
+                                */
+                               if (is_ereg(dst_reg))
+                                       EMIT3(0x45, 0x0F, 0xB7);
+                               else
+                                       EMIT2(0x0F, 0xB7);
+                               EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
+                               break;
+                       case 32:
+                               /* emit 'mov eax, eax' to clear upper 32-bits */
+                               if (is_ereg(dst_reg))
+                                       EMIT1(0x45);
+                               EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
+                               break;
+                       case 64:
+                               /* nop */
+                               break;
+                       }
                        break;
 
                        /* ST: *(u8*)(dst_reg + off) = imm */
@@ -938,7 +966,12 @@ void bpf_int_jit_compile(struct bpf_prog *prog)
        }
        ctx.cleanup_addr = proglen;
 
-       for (pass = 0; pass < 10; pass++) {
+       /* JITed image shrinks with every pass and the loop iterates
+        * until the image stops shrinking. Very large bpf programs
+        * may converge on the last pass. In such case do one more
+        * pass to emit the final image
+        */
+       for (pass = 0; pass < 10 || image; pass++) {
                proglen = do_jit(prog, addrs, image, oldproglen, &ctx);
                if (proglen <= 0) {
                        image = NULL;
index e4695985f9de85778db5e084b37eda5719d3a82a..14a63ed6fe092cd3f512d155046942a28c3744e8 100644 (file)
@@ -325,6 +325,26 @@ static void release_pci_root_info(struct pci_host_bridge *bridge)
        kfree(info);
 }
 
+/*
+ * An IO port or MMIO resource assigned to a PCI host bridge may be
+ * consumed by the host bridge itself or available to its child
+ * bus/devices. The ACPI specification defines a bit (Producer/Consumer)
+ * to tell whether the resource is consumed by the host bridge itself,
+ * but firmware hasn't used that bit consistently, so we can't rely on it.
+ *
+ * On x86 and IA64 platforms, all IO port and MMIO resources are assumed
+ * to be available to child bus/devices except one special case:
+ *     IO port [0xCF8-0xCFF] is consumed by the host bridge itself
+ *     to access PCI configuration space.
+ *
+ * So explicitly filter out PCI CFG IO ports[0xCF8-0xCFF].
+ */
+static bool resource_is_pcicfg_ioport(struct resource *res)
+{
+       return (res->flags & IORESOURCE_IO) &&
+               res->start == 0xCF8 && res->end == 0xCFF;
+}
+
 static void probe_pci_root_info(struct pci_root_info *info,
                                struct acpi_device *device,
                                int busnum, int domain,
@@ -346,8 +366,8 @@ static void probe_pci_root_info(struct pci_root_info *info,
                        "no IO and memory resources present in _CRS\n");
        else
                resource_list_for_each_entry_safe(entry, tmp, list) {
-                       if ((entry->res->flags & IORESOURCE_WINDOW) == 0 ||
-                           (entry->res->flags & IORESOURCE_DISABLED))
+                       if ((entry->res->flags & IORESOURCE_DISABLED) ||
+                           resource_is_pcicfg_ioport(entry->res))
                                resource_list_destroy_entry(entry);
                        else
                                entry->res->name = info->name;
@@ -462,9 +482,16 @@ struct pci_bus *pci_acpi_scan_root(struct acpi_pci_root *root)
 
 int pcibios_root_bridge_prepare(struct pci_host_bridge *bridge)
 {
-       struct pci_sysdata *sd = bridge->bus->sysdata;
-
-       ACPI_COMPANION_SET(&bridge->dev, sd->companion);
+       /*
+        * We pass NULL as parent to pci_create_root_bus(), so if it is not NULL
+        * here, pci_create_root_bus() has been called by someone else and
+        * sysdata is likely to be different from what we expect.  Let it go in
+        * that case.
+        */
+       if (!bridge->dev.parent) {
+               struct pci_sysdata *sd = bridge->bus->sysdata;
+               ACPI_COMPANION_SET(&bridge->dev, sd->companion);
+       }
        return 0;
 }
 
index 349c0d32cc0b140222141cfec5fa29a8c6ddbace..0a9f2caf358ff7230e6afa569219234127490c1a 100644 (file)
@@ -429,12 +429,12 @@ int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
         * Caller can followup with UC MINUS request and add a WC mtrr if there
         * is a free mtrr slot.
         */
-       if (!pat_enabled && write_combine)
+       if (!pat_enabled() && write_combine)
                return -EINVAL;
 
-       if (pat_enabled && write_combine)
+       if (pat_enabled() && write_combine)
                prot |= cachemode2protval(_PAGE_CACHE_MODE_WC);
-       else if (pat_enabled || boot_cpu_data.x86 > 3)
+       else if (pat_enabled() || boot_cpu_data.x86 > 3)
                /*
                 * ioremap() and ioremap_nocache() defaults to UC MINUS for now.
                 * To avoid attribute conflicts, request UC MINUS here
index 852aa4c92da027cb07fb64c77c855aaf0877a1da..27062303c88135b5d614455ed372861516903e53 100644 (file)
@@ -208,6 +208,7 @@ static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
 
 static int intel_mid_pci_irq_enable(struct pci_dev *dev)
 {
+       struct irq_alloc_info info;
        int polarity;
 
        if (dev->irq_managed && dev->irq > 0)
@@ -217,14 +218,13 @@ static int intel_mid_pci_irq_enable(struct pci_dev *dev)
                polarity = 0; /* active high */
        else
                polarity = 1; /* active low */
+       ioapic_set_alloc_attr(&info, dev_to_node(&dev->dev), 1, polarity);
 
        /*
         * MRST only have IOAPIC, the PCI irq lines are 1:1 mapped to
         * IOAPIC RTE entries, so we just enable RTE for the device.
         */
-       if (mp_set_gsi_attr(dev->irq, 1, polarity, dev_to_node(&dev->dev)))
-               return -EBUSY;
-       if (mp_map_gsi_to_irq(dev->irq, IOAPIC_MAP_ALLOC) < 0)
+       if (mp_map_gsi_to_irq(dev->irq, IOAPIC_MAP_ALLOC, &info) < 0)
                return -EBUSY;
 
        dev->irq_managed = 1;
index 5dc6ca5e174131d2c7208ea1ed86739ef4532d22..9bd115484745703791c6515b289938546d2478ab 100644 (file)
@@ -146,19 +146,20 @@ static void __init pirq_peer_trick(void)
 
 /*
  *  Code for querying and setting of IRQ routes on various interrupt routers.
+ *  PIC Edge/Level Control Registers (ELCR) 0x4d0 & 0x4d1.
  */
 
-void eisa_set_level_irq(unsigned int irq)
+void elcr_set_level_irq(unsigned int irq)
 {
        unsigned char mask = 1 << (irq & 7);
        unsigned int port = 0x4d0 + (irq >> 3);
        unsigned char val;
-       static u16 eisa_irq_mask;
+       static u16 elcr_irq_mask;
 
-       if (irq >= 16 || (1 << irq) & eisa_irq_mask)
+       if (irq >= 16 || (1 << irq) & elcr_irq_mask)
                return;
 
-       eisa_irq_mask |= (1 << irq);
+       elcr_irq_mask |= (1 << irq);
        printk(KERN_DEBUG "PCI: setting IRQ %u as level-triggered\n", irq);
        val = inb(port);
        if (!(val & mask)) {
@@ -965,11 +966,11 @@ static int pcibios_lookup_irq(struct pci_dev *dev, int assign)
        } else if (r->get && (irq = r->get(pirq_router_dev, dev, pirq)) && \
        ((!(pci_probe & PCI_USE_PIRQ_MASK)) || ((1 << irq) & mask))) {
                msg = "found";
-               eisa_set_level_irq(irq);
+               elcr_set_level_irq(irq);
        } else if (newirq && r->set &&
                (dev->class >> 8) != PCI_CLASS_DISPLAY_VGA) {
                if (r->set(pirq_router_dev, dev, pirq, newirq)) {
-                       eisa_set_level_irq(newirq);
+                       elcr_set_level_irq(newirq);
                        msg = "assigned";
                        irq = newirq;
                }
index a62e0be3a2f1b4f563ab5f6fda0acb59612ea523..f1a6c8e86ddd927e0b5f8d947b8f4e725a5b4933 100644 (file)
@@ -1,4 +1,5 @@
 # Platform specific code goes here
+obj-y  += atom/
 obj-y  += ce4100/
 obj-y  += efi/
 obj-y  += geode/
diff --git a/arch/x86/platform/atom/Makefile b/arch/x86/platform/atom/Makefile
new file mode 100644 (file)
index 0000000..0a3a40c
--- /dev/null
@@ -0,0 +1 @@
+obj-$(CONFIG_PUNIT_ATOM_DEBUG) += punit_atom_debug.o
diff --git a/arch/x86/platform/atom/punit_atom_debug.c b/arch/x86/platform/atom/punit_atom_debug.c
new file mode 100644 (file)
index 0000000..5ca8ead
--- /dev/null
@@ -0,0 +1,183 @@
+/*
+ * Intel SOC Punit device state debug driver
+ * Punit controls power management for North Complex devices (Graphics
+ * blocks, Image Signal Processing, video processing, display, DSP etc.)
+ *
+ * Copyright (c) 2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/io.h>
+#include <asm/cpu_device_id.h>
+#include <asm/iosf_mbi.h>
+
+/* Side band Interface port */
+#define PUNIT_PORT             0x04
+/* Power gate status reg */
+#define PWRGT_STATUS           0x61
+/* Subsystem config/status Video processor */
+#define VED_SS_PM0             0x32
+/* Subsystem config/status ISP (Image Signal Processor) */
+#define ISP_SS_PM0             0x39
+/* Subsystem config/status Input/output controller */
+#define MIO_SS_PM              0x3B
+/* Shift bits for getting status for video, isp and i/o */
+#define SSS_SHIFT              24
+/* Shift bits for getting status for graphics rendering */
+#define RENDER_POS             0
+/* Shift bits for getting status for media control */
+#define MEDIA_POS              2
+/* Shift bits for getting status for Valley View/Baytrail display */
+#define VLV_DISPLAY_POS                6
+/* Subsystem config/status display for Cherry Trail SOC */
+#define CHT_DSP_SSS            0x36
+/* Shift bits for getting status for display */
+#define CHT_DSP_SSS_POS                16
+
+struct punit_device {
+       char *name;
+       int reg;
+       int sss_pos;
+};
+
+static const struct punit_device punit_device_byt[] = {
+       { "GFX RENDER", PWRGT_STATUS,   RENDER_POS },
+       { "GFX MEDIA",  PWRGT_STATUS,   MEDIA_POS },
+       { "DISPLAY",    PWRGT_STATUS,   VLV_DISPLAY_POS },
+       { "VED",        VED_SS_PM0,     SSS_SHIFT },
+       { "ISP",        ISP_SS_PM0,     SSS_SHIFT },
+       { "MIO",        MIO_SS_PM,      SSS_SHIFT },
+       { NULL }
+};
+
+static const struct punit_device punit_device_cht[] = {
+       { "GFX RENDER", PWRGT_STATUS,   RENDER_POS },
+       { "GFX MEDIA",  PWRGT_STATUS,   MEDIA_POS },
+       { "DISPLAY",    CHT_DSP_SSS,    CHT_DSP_SSS_POS },
+       { "VED",        VED_SS_PM0,     SSS_SHIFT },
+       { "ISP",        ISP_SS_PM0,     SSS_SHIFT },
+       { "MIO",        MIO_SS_PM,      SSS_SHIFT },
+       { NULL }
+};
+
+static const char * const dstates[] = {"D0", "D0i1", "D0i2", "D0i3"};
+
+static int punit_dev_state_show(struct seq_file *seq_file, void *unused)
+{
+       u32 punit_pwr_status;
+       struct punit_device *punit_devp = seq_file->private;
+       int index;
+       int status;
+
+       seq_puts(seq_file, "\n\nPUNIT NORTH COMPLEX DEVICES :\n");
+       while (punit_devp->name) {
+               status = iosf_mbi_read(PUNIT_PORT, BT_MBI_PMC_READ,
+                                      punit_devp->reg,
+                                      &punit_pwr_status);
+               if (status) {
+                       seq_printf(seq_file, "%9s : Read Failed\n",
+                                  punit_devp->name);
+               } else  {
+                       index = (punit_pwr_status >> punit_devp->sss_pos) & 3;
+                       seq_printf(seq_file, "%9s : %s\n", punit_devp->name,
+                                  dstates[index]);
+               }
+               punit_devp++;
+       }
+
+       return 0;
+}
+
+static int punit_dev_state_open(struct inode *inode, struct file *file)
+{
+       return single_open(file, punit_dev_state_show, inode->i_private);
+}
+
+static const struct file_operations punit_dev_state_ops = {
+       .open           = punit_dev_state_open,
+       .read           = seq_read,
+       .llseek         = seq_lseek,
+       .release        = single_release,
+};
+
+static struct dentry *punit_dbg_file;
+
+static int punit_dbgfs_register(struct punit_device *punit_device)
+{
+       static struct dentry *dev_state;
+
+       punit_dbg_file = debugfs_create_dir("punit_atom", NULL);
+       if (!punit_dbg_file)
+               return -ENXIO;
+
+       dev_state = debugfs_create_file("dev_power_state", S_IFREG | S_IRUGO,
+                                       punit_dbg_file, punit_device,
+                                       &punit_dev_state_ops);
+       if (!dev_state) {
+               pr_err("punit_dev_state register failed\n");
+               debugfs_remove(punit_dbg_file);
+               return -ENXIO;
+       }
+
+       return 0;
+}
+
+static void punit_dbgfs_unregister(void)
+{
+       debugfs_remove_recursive(punit_dbg_file);
+}
+
+#define ICPU(model, drv_data) \
+       { X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT,\
+         (kernel_ulong_t)&drv_data }
+
+static const struct x86_cpu_id intel_punit_cpu_ids[] = {
+       ICPU(55, punit_device_byt), /* Valleyview, Bay Trail */
+       ICPU(76, punit_device_cht), /* Braswell, Cherry Trail */
+       {}
+};
+
+MODULE_DEVICE_TABLE(x86cpu, intel_punit_cpu_ids);
+
+static int __init punit_atom_debug_init(void)
+{
+       const struct x86_cpu_id *id;
+       int ret;
+
+       id = x86_match_cpu(intel_punit_cpu_ids);
+       if (!id)
+               return -ENODEV;
+
+       ret = punit_dbgfs_register((struct punit_device *)id->driver_data);
+       if (ret < 0)
+               return ret;
+
+       return 0;
+}
+
+static void __exit punit_atom_debug_exit(void)
+{
+       punit_dbgfs_unregister();
+}
+
+module_init(punit_atom_debug_init);
+module_exit(punit_atom_debug_exit);
+
+MODULE_AUTHOR("Kumar P, Mahesh <mahesh.kumar.p@intel.com>");
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_DESCRIPTION("Driver for Punit devices states debugging");
+MODULE_LICENSE("GPL v2");
index 02744df576d52588a35308998ecc1a138435012e..3b984c3aa1b0b5ba6e7b5e5321edb4fbc7013e5d 100644 (file)
@@ -501,6 +501,8 @@ void __init efi_init(void)
 
        if (efi_enabled(EFI_DBG))
                print_efi_memmap();
+
+       efi_esrt_init();
 }
 
 void __init efi_late_init(void)
index 0b283d4d0ad770d945f407c4921d09ecacb34db4..de734134bc8d2e1733fb831a32cdbfd8fcf80ed5 100644 (file)
@@ -27,6 +27,7 @@ static struct platform_device wdt_dev = {
 static int tangier_probe(struct platform_device *pdev)
 {
        int gsi;
+       struct irq_alloc_info info;
        struct intel_mid_wdt_pdata *pdata = pdev->dev.platform_data;
 
        if (!pdata)
@@ -34,8 +35,8 @@ static int tangier_probe(struct platform_device *pdev)
 
        /* IOAPIC builds identity mapping between GSI and IRQ on MID */
        gsi = pdata->irq;
-       if (mp_set_gsi_attr(gsi, 1, 0, cpu_to_node(0)) ||
-           mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC) <= 0) {
+       ioapic_set_alloc_attr(&info, cpu_to_node(0), 1, 0);
+       if (mp_map_gsi_to_irq(gsi, IOAPIC_MAP_ALLOC, &info) <= 0) {
                dev_warn(&pdev->dev, "cannot find interrupt %d in ioapic\n",
                         gsi);
                return -EINVAL;
index 3005f0c89f2ecfbcc817c7e379c97586d5524e7f..01d54ea766c16539d001da113320c56ee8eac1f8 100644 (file)
@@ -81,26 +81,34 @@ static unsigned long __init intel_mid_calibrate_tsc(void)
        return 0;
 }
 
+static void __init intel_mid_setup_bp_timer(void)
+{
+       apbt_time_init();
+       setup_boot_APIC_clock();
+}
+
 static void __init intel_mid_time_init(void)
 {
        sfi_table_parse(SFI_SIG_MTMR, NULL, NULL, sfi_parse_mtmr);
+
        switch (intel_mid_timer_options) {
        case INTEL_MID_TIMER_APBT_ONLY:
                break;
        case INTEL_MID_TIMER_LAPIC_APBT:
-               x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
+               /* Use apbt and local apic */
+               x86_init.timers.setup_percpu_clockev = intel_mid_setup_bp_timer;
                x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
-               break;
+               return;
        default:
                if (!boot_cpu_has(X86_FEATURE_ARAT))
                        break;
+               /* Lapic only, no apbt */
                x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
                x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
                return;
        }
-       /* we need at least one APB timer */
-       pre_init_apic_IRQ0();
-       apbt_time_init();
+
+       x86_init.timers.setup_percpu_clockev = apbt_time_init;
 }
 
 static void intel_mid_arch_setup(void)
index c14ad34776c466f3cb18fe4b0a50fe40db8cd3ec..ce992e8cc06526c4e61f1efb1b852e0f41df9ccf 100644 (file)
@@ -95,18 +95,16 @@ int __init sfi_parse_mtmr(struct sfi_table_header *table)
                pr_debug("timer[%d]: paddr = 0x%08x, freq = %dHz, irq = %d\n",
                        totallen, (u32)pentry->phys_addr,
                        pentry->freq_hz, pentry->irq);
-                       if (!pentry->irq)
-                               continue;
-                       mp_irq.type = MP_INTSRC;
-                       mp_irq.irqtype = mp_INT;
-/* triggering mode edge bit 2-3, active high polarity bit 0-1 */
-                       mp_irq.irqflag = 5;
-                       mp_irq.srcbus = MP_BUS_ISA;
-                       mp_irq.srcbusirq = pentry->irq; /* IRQ */
-                       mp_irq.dstapic = MP_APIC_ALL;
-                       mp_irq.dstirq = pentry->irq;
-                       mp_save_irq(&mp_irq);
-                       mp_map_gsi_to_irq(pentry->irq, IOAPIC_MAP_ALLOC);
+               mp_irq.type = MP_INTSRC;
+               mp_irq.irqtype = mp_INT;
+               /* triggering mode edge bit 2-3, active high polarity bit 0-1 */
+               mp_irq.irqflag = 5;
+               mp_irq.srcbus = MP_BUS_ISA;
+               mp_irq.srcbusirq = pentry->irq; /* IRQ */
+               mp_irq.dstapic = MP_APIC_ALL;
+               mp_irq.dstirq = pentry->irq;
+               mp_save_irq(&mp_irq);
+               mp_map_gsi_to_irq(pentry->irq, IOAPIC_MAP_ALLOC, NULL);
        }
 
        return 0;
@@ -177,7 +175,7 @@ int __init sfi_parse_mrtc(struct sfi_table_header *table)
                mp_irq.dstapic = MP_APIC_ALL;
                mp_irq.dstirq = pentry->irq;
                mp_save_irq(&mp_irq);
-               mp_map_gsi_to_irq(pentry->irq, IOAPIC_MAP_ALLOC);
+               mp_map_gsi_to_irq(pentry->irq, IOAPIC_MAP_ALLOC, NULL);
        }
        return 0;
 }
@@ -436,6 +434,7 @@ static int __init sfi_parse_devs(struct sfi_table_header *table)
        struct devs_id *dev = NULL;
        int num, i, ret;
        int polarity;
+       struct irq_alloc_info info;
 
        sb = (struct sfi_table_simple *)table;
        num = SFI_GET_NUM_ENTRIES(sb, struct sfi_device_table_entry);
@@ -469,9 +468,8 @@ static int __init sfi_parse_devs(struct sfi_table_header *table)
                                polarity = 1;
                        }
 
-                       ret = mp_set_gsi_attr(irq, 1, polarity, NUMA_NO_NODE);
-                       if (ret == 0)
-                               ret = mp_map_gsi_to_irq(irq, IOAPIC_MAP_ALLOC);
+                       ioapic_set_alloc_attr(&info, NUMA_NO_NODE, 1, polarity);
+                       ret = mp_map_gsi_to_irq(irq, IOAPIC_MAP_ALLOC, &info);
                        WARN_ON(ret < 0);
                }
 
index 2a8a74f3bd76c1338e2378c26bc231cc5adf9bea..6c7111bbd1e92faf169b9b046726f704c260e8d2 100644 (file)
@@ -25,8 +25,8 @@
 #include <linux/init.h>
 #include <linux/sfi.h>
 #include <linux/io.h>
-#include <linux/irqdomain.h>
 
+#include <asm/irqdomain.h>
 #include <asm/io_apic.h>
 #include <asm/mpspec.h>
 #include <asm/setup.h>
@@ -71,9 +71,6 @@ static int __init sfi_parse_cpus(struct sfi_table_header *table)
 #endif /* CONFIG_X86_LOCAL_APIC */
 
 #ifdef CONFIG_X86_IO_APIC
-static struct irq_domain_ops sfi_ioapic_irqdomain_ops = {
-       .map = mp_irqdomain_map,
-};
 
 static int __init sfi_parse_ioapic(struct sfi_table_header *table)
 {
@@ -82,7 +79,7 @@ static int __init sfi_parse_ioapic(struct sfi_table_header *table)
        int i, num;
        struct ioapic_domain_cfg cfg = {
                .type = IOAPIC_DOMAIN_STRICT,
-               .ops = &sfi_ioapic_irqdomain_ops,
+               .ops = &mp_ioapic_irqdomain_ops,
        };
 
        sb = (struct sfi_table_simple *)table;
index 0ce67364543242a21fd3d6e2b40f8864623ce668..8570abe68be1feb0b12bb1828a7126d07b94307d 100644 (file)
 #include <linux/slab.h>
 #include <linux/irq.h>
 
+#include <asm/irqdomain.h>
 #include <asm/apic.h>
 #include <asm/uv/uv_irq.h>
 #include <asm/uv/uv_hub.h>
 
 /* MMR offset and pnode of hub sourcing interrupts for a given irq */
-struct uv_irq_2_mmr_pnode{
-       struct rb_node          list;
+struct uv_irq_2_mmr_pnode {
        unsigned long           offset;
        int                     pnode;
-       int                     irq;
 };
 
-static DEFINE_SPINLOCK(uv_irq_lock);
-static struct rb_root          uv_irq_root;
+static void uv_program_mmr(struct irq_cfg *cfg, struct uv_irq_2_mmr_pnode *info)
+{
+       unsigned long mmr_value;
+       struct uv_IO_APIC_route_entry *entry;
+
+       BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
+                    sizeof(unsigned long));
+
+       mmr_value = 0;
+       entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
+       entry->vector           = cfg->vector;
+       entry->delivery_mode    = apic->irq_delivery_mode;
+       entry->dest_mode        = apic->irq_dest_mode;
+       entry->polarity         = 0;
+       entry->trigger          = 0;
+       entry->mask             = 0;
+       entry->dest             = cfg->dest_apicid;
 
-static int uv_set_irq_affinity(struct irq_data *, const struct cpumask *, bool);
+       uv_write_global_mmr64(info->pnode, info->offset, mmr_value);
+}
 
 static void uv_noop(struct irq_data *data) { }
 
@@ -37,6 +52,23 @@ static void uv_ack_apic(struct irq_data *data)
        ack_APIC_irq();
 }
 
+static int
+uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
+                   bool force)
+{
+       struct irq_data *parent = data->parent_data;
+       struct irq_cfg *cfg = irqd_cfg(data);
+       int ret;
+
+       ret = parent->chip->irq_set_affinity(parent, mask, force);
+       if (ret >= 0) {
+               uv_program_mmr(cfg, data->chip_data);
+               send_cleanup_vector(cfg);
+       }
+
+       return ret;
+}
+
 static struct irq_chip uv_irq_chip = {
        .name                   = "UV-CORE",
        .irq_mask               = uv_noop,
@@ -45,189 +77,99 @@ static struct irq_chip uv_irq_chip = {
        .irq_set_affinity       = uv_set_irq_affinity,
 };
 
-/*
- * Add offset and pnode information of the hub sourcing interrupts to the
- * rb tree for a specific irq.
- */
-static int uv_set_irq_2_mmr_info(int irq, unsigned long offset, unsigned blade)
+static int uv_domain_alloc(struct irq_domain *domain, unsigned int virq,
+                          unsigned int nr_irqs, void *arg)
 {
-       struct rb_node **link = &uv_irq_root.rb_node;
-       struct rb_node *parent = NULL;
-       struct uv_irq_2_mmr_pnode *n;
-       struct uv_irq_2_mmr_pnode *e;
-       unsigned long irqflags;
-
-       n = kmalloc_node(sizeof(struct uv_irq_2_mmr_pnode), GFP_KERNEL,
-                               uv_blade_to_memory_nid(blade));
-       if (!n)
+       struct uv_irq_2_mmr_pnode *chip_data;
+       struct irq_alloc_info *info = arg;
+       struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
+       int ret;
+
+       if (nr_irqs > 1 || !info || info->type != X86_IRQ_ALLOC_TYPE_UV)
+               return -EINVAL;
+
+       chip_data = kmalloc_node(sizeof(*chip_data), GFP_KERNEL,
+                                irq_data->node);
+       if (!chip_data)
                return -ENOMEM;
 
-       n->irq = irq;
-       n->offset = offset;
-       n->pnode = uv_blade_to_pnode(blade);
-       spin_lock_irqsave(&uv_irq_lock, irqflags);
-       /* Find the right place in the rbtree: */
-       while (*link) {
-               parent = *link;
-               e = rb_entry(parent, struct uv_irq_2_mmr_pnode, list);
-
-               if (unlikely(irq == e->irq)) {
-                       /* irq entry exists */
-                       e->pnode = uv_blade_to_pnode(blade);
-                       e->offset = offset;
-                       spin_unlock_irqrestore(&uv_irq_lock, irqflags);
-                       kfree(n);
-                       return 0;
-               }
-
-               if (irq < e->irq)
-                       link = &(*link)->rb_left;
+       ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
+       if (ret >= 0) {
+               if (info->uv_limit == UV_AFFINITY_CPU)
+                       irq_set_status_flags(virq, IRQ_NO_BALANCING);
                else
-                       link = &(*link)->rb_right;
+                       irq_set_status_flags(virq, IRQ_MOVE_PCNTXT);
+
+               chip_data->pnode = uv_blade_to_pnode(info->uv_blade);
+               chip_data->offset = info->uv_offset;
+               irq_domain_set_info(domain, virq, virq, &uv_irq_chip, chip_data,
+                                   handle_percpu_irq, NULL, info->uv_name);
+       } else {
+               kfree(chip_data);
        }
 
-       /* Insert the node into the rbtree. */
-       rb_link_node(&n->list, parent, link);
-       rb_insert_color(&n->list, &uv_irq_root);
-
-       spin_unlock_irqrestore(&uv_irq_lock, irqflags);
-       return 0;
+       return ret;
 }
 
-/* Retrieve offset and pnode information from the rb tree for a specific irq */
-int uv_irq_2_mmr_info(int irq, unsigned long *offset, int *pnode)
+static void uv_domain_free(struct irq_domain *domain, unsigned int virq,
+                          unsigned int nr_irqs)
 {
-       struct uv_irq_2_mmr_pnode *e;
-       struct rb_node *n;
-       unsigned long irqflags;
-
-       spin_lock_irqsave(&uv_irq_lock, irqflags);
-       n = uv_irq_root.rb_node;
-       while (n) {
-               e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
-
-               if (e->irq == irq) {
-                       *offset = e->offset;
-                       *pnode = e->pnode;
-                       spin_unlock_irqrestore(&uv_irq_lock, irqflags);
-                       return 0;
-               }
-
-               if (irq < e->irq)
-                       n = n->rb_left;
-               else
-                       n = n->rb_right;
-       }
-       spin_unlock_irqrestore(&uv_irq_lock, irqflags);
-       return -1;
+       struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
+
+       BUG_ON(nr_irqs != 1);
+       kfree(irq_data->chip_data);
+       irq_clear_status_flags(virq, IRQ_MOVE_PCNTXT);
+       irq_clear_status_flags(virq, IRQ_NO_BALANCING);
+       irq_domain_free_irqs_top(domain, virq, nr_irqs);
 }
 
 /*
  * Re-target the irq to the specified CPU and enable the specified MMR located
  * on the specified blade to allow the sending of MSIs to the specified CPU.
  */
-static int
-arch_enable_uv_irq(char *irq_name, unsigned int irq, int cpu, int mmr_blade,
-                      unsigned long mmr_offset, int limit)
+static void uv_domain_activate(struct irq_domain *domain,
+                              struct irq_data *irq_data)
 {
-       const struct cpumask *eligible_cpu = cpumask_of(cpu);
-       struct irq_cfg *cfg = irq_cfg(irq);
-       unsigned long mmr_value;
-       struct uv_IO_APIC_route_entry *entry;
-       int mmr_pnode, err;
-       unsigned int dest;
-
-       BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
-                       sizeof(unsigned long));
-
-       err = assign_irq_vector(irq, cfg, eligible_cpu);
-       if (err != 0)
-               return err;
-
-       err = apic->cpu_mask_to_apicid_and(eligible_cpu, eligible_cpu, &dest);
-       if (err != 0)
-               return err;
-
-       if (limit == UV_AFFINITY_CPU)
-               irq_set_status_flags(irq, IRQ_NO_BALANCING);
-       else
-               irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
-
-       irq_set_chip_and_handler_name(irq, &uv_irq_chip, handle_percpu_irq,
-                                     irq_name);
-
-       mmr_value = 0;
-       entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
-       entry->vector           = cfg->vector;
-       entry->delivery_mode    = apic->irq_delivery_mode;
-       entry->dest_mode        = apic->irq_dest_mode;
-       entry->polarity         = 0;
-       entry->trigger          = 0;
-       entry->mask             = 0;
-       entry->dest             = dest;
-
-       mmr_pnode = uv_blade_to_pnode(mmr_blade);
-       uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
-
-       if (cfg->move_in_progress)
-               send_cleanup_vector(cfg);
-
-       return irq;
+       uv_program_mmr(irqd_cfg(irq_data), irq_data->chip_data);
 }
 
 /*
  * Disable the specified MMR located on the specified blade so that MSIs are
  * longer allowed to be sent.
  */
-static void arch_disable_uv_irq(int mmr_pnode, unsigned long mmr_offset)
+static void uv_domain_deactivate(struct irq_domain *domain,
+                                struct irq_data *irq_data)
 {
        unsigned long mmr_value;
        struct uv_IO_APIC_route_entry *entry;
 
-       BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
-                       sizeof(unsigned long));
-
        mmr_value = 0;
        entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
        entry->mask = 1;
-
-       uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
+       uv_program_mmr(irqd_cfg(irq_data), irq_data->chip_data);
 }
 
-static int
-uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
-                   bool force)
-{
-       struct irq_cfg *cfg = irqd_cfg(data);
-       unsigned int dest;
-       unsigned long mmr_value, mmr_offset;
-       struct uv_IO_APIC_route_entry *entry;
-       int mmr_pnode;
-
-       if (apic_set_affinity(data, mask, &dest))
-               return -1;
-
-       mmr_value = 0;
-       entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
-
-       entry->vector           = cfg->vector;
-       entry->delivery_mode    = apic->irq_delivery_mode;
-       entry->dest_mode        = apic->irq_dest_mode;
-       entry->polarity         = 0;
-       entry->trigger          = 0;
-       entry->mask             = 0;
-       entry->dest             = dest;
-
-       /* Get previously stored MMR and pnode of hub sourcing interrupts */
-       if (uv_irq_2_mmr_info(data->irq, &mmr_offset, &mmr_pnode))
-               return -1;
-
-       uv_write_global_mmr64(mmr_pnode, mmr_offset, mmr_value);
+static const struct irq_domain_ops uv_domain_ops = {
+       .alloc          = uv_domain_alloc,
+       .free           = uv_domain_free,
+       .activate       = uv_domain_activate,
+       .deactivate     = uv_domain_deactivate,
+};
 
-       if (cfg->move_in_progress)
-               send_cleanup_vector(cfg);
+static struct irq_domain *uv_get_irq_domain(void)
+{
+       static struct irq_domain *uv_domain;
+       static DEFINE_MUTEX(uv_lock);
+
+       mutex_lock(&uv_lock);
+       if (uv_domain == NULL) {
+               uv_domain = irq_domain_add_tree(NULL, &uv_domain_ops, NULL);
+               if (uv_domain)
+                       uv_domain->parent = x86_vector_domain;
+       }
+       mutex_unlock(&uv_lock);
 
-       return IRQ_SET_MASK_OK_NOCOPY;
+       return uv_domain;
 }
 
 /*
@@ -238,19 +180,21 @@ uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
 int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
                 unsigned long mmr_offset, int limit)
 {
-       int ret, irq = irq_alloc_hwirq(uv_blade_to_memory_nid(mmr_blade));
+       struct irq_alloc_info info;
+       struct irq_domain *domain = uv_get_irq_domain();
 
-       if (!irq)
-               return -EBUSY;
+       if (!domain)
+               return -ENOMEM;
 
-       ret = arch_enable_uv_irq(irq_name, irq, cpu, mmr_blade, mmr_offset,
-               limit);
-       if (ret == irq)
-               uv_set_irq_2_mmr_info(irq, mmr_offset, mmr_blade);
-       else
-               irq_free_hwirq(irq);
+       init_irq_alloc_info(&info, cpumask_of(cpu));
+       info.type = X86_IRQ_ALLOC_TYPE_UV;
+       info.uv_limit = limit;
+       info.uv_blade = mmr_blade;
+       info.uv_offset = mmr_offset;
+       info.uv_name = irq_name;
 
-       return ret;
+       return irq_domain_alloc_irqs(domain, 1,
+                                    uv_blade_to_memory_nid(mmr_blade), &info);
 }
 EXPORT_SYMBOL_GPL(uv_setup_irq);
 
@@ -263,26 +207,6 @@ EXPORT_SYMBOL_GPL(uv_setup_irq);
  */
 void uv_teardown_irq(unsigned int irq)
 {
-       struct uv_irq_2_mmr_pnode *e;
-       struct rb_node *n;
-       unsigned long irqflags;
-
-       spin_lock_irqsave(&uv_irq_lock, irqflags);
-       n = uv_irq_root.rb_node;
-       while (n) {
-               e = rb_entry(n, struct uv_irq_2_mmr_pnode, list);
-               if (e->irq == irq) {
-                       arch_disable_uv_irq(e->pnode, e->offset);
-                       rb_erase(n, &uv_irq_root);
-                       kfree(e);
-                       break;
-               }
-               if (irq < e->irq)
-                       n = n->rb_left;
-               else
-                       n = n->rb_right;
-       }
-       spin_unlock_irqrestore(&uv_irq_lock, irqflags);
-       irq_free_hwirq(irq);
+       irq_domain_free_irqs(irq, 1);
 }
 EXPORT_SYMBOL_GPL(uv_teardown_irq);
index 757678fb26e1a06277687c1c90f86e75377de03a..0d7dd1f5ac36fa6814c18522dd28561566c570eb 100644 (file)
 #include <asm/mtrr.h>
 #include <asm/page.h>
 #include <asm/mce.h>
-#include <asm/xcr.h>
 #include <asm/suspend.h>
+#include <asm/fpu/internal.h>
 #include <asm/debugreg.h>
-#include <asm/fpu-internal.h> /* pcntxt_mask */
 #include <asm/cpu.h>
 
 #ifdef CONFIG_X86_32
@@ -155,6 +154,8 @@ static void fix_processor_context(void)
 #endif
        load_TR_desc();                         /* This does ltr */
        load_LDT(&current->active_mm->context); /* This does lldt */
+
+       fpu__resume_cpu();
 }
 
 /**
@@ -221,12 +222,6 @@ static void notrace __restore_processor_state(struct saved_context *ctxt)
        wrmsrl(MSR_KERNEL_GS_BASE, ctxt->gs_kernel_base);
 #endif
 
-       /*
-        * restore XCR0 for xsave capable cpu's.
-        */
-       if (cpu_has_xsave)
-               xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);
-
        fix_processor_context();
 
        do_fpu_end();
index 3c4469a7a929c7fa3488707a25829cdc6fcc6411..e2386cb4e0c315f32fae03afd13045c4047eb902 100644 (file)
@@ -78,9 +78,9 @@ ENTRY(restore_image)
 
        /* code below has been relocated to a safe page */
 ENTRY(core_restore_code)
-loop:
+.Lloop:
        testq   %rdx, %rdx
-       jz      done
+       jz      .Ldone
 
        /* get addresses from the pbe and copy the page */
        movq    pbe_address(%rdx), %rsi
@@ -91,8 +91,8 @@ loop:
 
        /* progress to the next pbe */
        movq    pbe_next(%rdx), %rdx
-       jmp     loop
-done:
+       jmp     .Lloop
+.Ldone:
        /* jump to the restore_registers address from the image header */
        jmpq    *%rax
        /*
diff --git a/arch/x86/syscalls/Makefile b/arch/x86/syscalls/Makefile
deleted file mode 100644 (file)
index a55abb9..0000000
+++ /dev/null
@@ -1,69 +0,0 @@
-out := $(obj)/../include/generated/asm
-uapi := $(obj)/../include/generated/uapi/asm
-
-# Create output directory if not already present
-_dummy := $(shell [ -d '$(out)' ] || mkdir -p '$(out)') \
-         $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)')
-
-syscall32 := $(srctree)/$(src)/syscall_32.tbl
-syscall64 := $(srctree)/$(src)/syscall_64.tbl
-
-syshdr := $(srctree)/$(src)/syscallhdr.sh
-systbl := $(srctree)/$(src)/syscalltbl.sh
-
-quiet_cmd_syshdr = SYSHDR  $@
-      cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
-                  '$(syshdr_abi_$(basetarget))' \
-                  '$(syshdr_pfx_$(basetarget))' \
-                  '$(syshdr_offset_$(basetarget))'
-quiet_cmd_systbl = SYSTBL  $@
-      cmd_systbl = $(CONFIG_SHELL) '$(systbl)' $< $@
-
-quiet_cmd_hypercalls = HYPERCALLS $@
-      cmd_hypercalls = $(CONFIG_SHELL) '$<' $@ $(filter-out $<,$^)
-
-syshdr_abi_unistd_32 := i386
-$(uapi)/unistd_32.h: $(syscall32) $(syshdr)
-       $(call if_changed,syshdr)
-
-syshdr_abi_unistd_32_ia32 := i386
-syshdr_pfx_unistd_32_ia32 := ia32_
-$(out)/unistd_32_ia32.h: $(syscall32) $(syshdr)
-       $(call if_changed,syshdr)
-
-syshdr_abi_unistd_x32 := common,x32
-syshdr_offset_unistd_x32 := __X32_SYSCALL_BIT
-$(uapi)/unistd_x32.h: $(syscall64) $(syshdr)
-       $(call if_changed,syshdr)
-
-syshdr_abi_unistd_64 := common,64
-$(uapi)/unistd_64.h: $(syscall64) $(syshdr)
-       $(call if_changed,syshdr)
-
-syshdr_abi_unistd_64_x32 := x32
-syshdr_pfx_unistd_64_x32 := x32_
-$(out)/unistd_64_x32.h: $(syscall64) $(syshdr)
-       $(call if_changed,syshdr)
-
-$(out)/syscalls_32.h: $(syscall32) $(systbl)
-       $(call if_changed,systbl)
-$(out)/syscalls_64.h: $(syscall64) $(systbl)
-       $(call if_changed,systbl)
-
-$(out)/xen-hypercalls.h: $(srctree)/scripts/xen-hypercalls.sh
-       $(call if_changed,hypercalls)
-
-$(out)/xen-hypercalls.h: $(srctree)/include/xen/interface/xen*.h
-
-uapisyshdr-y                   += unistd_32.h unistd_64.h unistd_x32.h
-syshdr-y                       += syscalls_32.h
-syshdr-$(CONFIG_X86_64)                += unistd_32_ia32.h unistd_64_x32.h
-syshdr-$(CONFIG_X86_64)                += syscalls_64.h
-syshdr-$(CONFIG_XEN)           += xen-hypercalls.h
-
-targets        += $(uapisyshdr-y) $(syshdr-y)
-
-PHONY += all
-all: $(addprefix $(uapi)/,$(uapisyshdr-y))
-all: $(addprefix $(out)/,$(syshdr-y))
-       @:
diff --git a/arch/x86/syscalls/syscall_32.tbl b/arch/x86/syscalls/syscall_32.tbl
deleted file mode 100644 (file)
index ef8187f..0000000
+++ /dev/null
@@ -1,367 +0,0 @@
-#
-# 32-bit system call numbers and entry vectors
-#
-# The format is:
-# <number> <abi> <name> <entry point> <compat entry point>
-#
-# The abi is always "i386" for this file.
-#
-0      i386    restart_syscall         sys_restart_syscall
-1      i386    exit                    sys_exit
-2      i386    fork                    sys_fork                        stub32_fork
-3      i386    read                    sys_read
-4      i386    write                   sys_write
-5      i386    open                    sys_open                        compat_sys_open
-6      i386    close                   sys_close
-7      i386    waitpid                 sys_waitpid                     sys32_waitpid
-8      i386    creat                   sys_creat
-9      i386    link                    sys_link
-10     i386    unlink                  sys_unlink
-11     i386    execve                  sys_execve                      stub32_execve
-12     i386    chdir                   sys_chdir
-13     i386    time                    sys_time                        compat_sys_time
-14     i386    mknod                   sys_mknod
-15     i386    chmod                   sys_chmod
-16     i386    lchown                  sys_lchown16
-17     i386    break
-18     i386    oldstat                 sys_stat
-19     i386    lseek                   sys_lseek                       compat_sys_lseek
-20     i386    getpid                  sys_getpid
-21     i386    mount                   sys_mount                       compat_sys_mount
-22     i386    umount                  sys_oldumount
-23     i386    setuid                  sys_setuid16
-24     i386    getuid                  sys_getuid16
-25     i386    stime                   sys_stime                       compat_sys_stime
-26     i386    ptrace                  sys_ptrace                      compat_sys_ptrace
-27     i386    alarm                   sys_alarm
-28     i386    oldfstat                sys_fstat
-29     i386    pause                   sys_pause
-30     i386    utime                   sys_utime                       compat_sys_utime
-31     i386    stty
-32     i386    gtty
-33     i386    access                  sys_access
-34     i386    nice                    sys_nice
-35     i386    ftime
-36     i386    sync                    sys_sync
-37     i386    kill                    sys_kill
-38     i386    rename                  sys_rename
-39     i386    mkdir                   sys_mkdir
-40     i386    rmdir                   sys_rmdir
-41     i386    dup                     sys_dup
-42     i386    pipe                    sys_pipe
-43     i386    times                   sys_times                       compat_sys_times
-44     i386    prof
-45     i386    brk                     sys_brk
-46     i386    setgid                  sys_setgid16
-47     i386    getgid                  sys_getgid16
-48     i386    signal                  sys_signal
-49     i386    geteuid                 sys_geteuid16
-50     i386    getegid                 sys_getegid16
-51     i386    acct                    sys_acct
-52     i386    umount2                 sys_umount
-53     i386    lock
-54     i386    ioctl                   sys_ioctl                       compat_sys_ioctl
-55     i386    fcntl                   sys_fcntl                       compat_sys_fcntl64
-56     i386    mpx
-57     i386    setpgid                 sys_setpgid
-58     i386    ulimit
-59     i386    oldolduname             sys_olduname
-60     i386    umask                   sys_umask
-61     i386    chroot                  sys_chroot
-62     i386    ustat                   sys_ustat                       compat_sys_ustat
-63     i386    dup2                    sys_dup2
-64     i386    getppid                 sys_getppid
-65     i386    getpgrp                 sys_getpgrp
-66     i386    setsid                  sys_setsid
-67     i386    sigaction               sys_sigaction                   compat_sys_sigaction
-68     i386    sgetmask                sys_sgetmask
-69     i386    ssetmask                sys_ssetmask
-70     i386    setreuid                sys_setreuid16
-71     i386    setregid                sys_setregid16
-72     i386    sigsuspend              sys_sigsuspend                  sys_sigsuspend
-73     i386    sigpending              sys_sigpending                  compat_sys_sigpending
-74     i386    sethostname             sys_sethostname
-75     i386    setrlimit               sys_setrlimit                   compat_sys_setrlimit
-76     i386    getrlimit               sys_old_getrlimit               compat_sys_old_getrlimit
-77     i386    getrusage               sys_getrusage                   compat_sys_getrusage
-78     i386    gettimeofday            sys_gettimeofday                compat_sys_gettimeofday
-79     i386    settimeofday            sys_settimeofday                compat_sys_settimeofday
-80     i386    getgroups               sys_getgroups16
-81     i386    setgroups               sys_setgroups16
-82     i386    select                  sys_old_select                  compat_sys_old_select
-83     i386    symlink                 sys_symlink
-84     i386    oldlstat                sys_lstat
-85     i386    readlink                sys_readlink
-86     i386    uselib                  sys_uselib
-87     i386    swapon                  sys_swapon
-88     i386    reboot                  sys_reboot
-89     i386    readdir                 sys_old_readdir                 compat_sys_old_readdir
-90     i386    mmap                    sys_old_mmap                    sys32_mmap
-91     i386    munmap                  sys_munmap
-92     i386    truncate                sys_truncate                    compat_sys_truncate
-93     i386    ftruncate               sys_ftruncate                   compat_sys_ftruncate
-94     i386    fchmod                  sys_fchmod
-95     i386    fchown                  sys_fchown16
-96     i386    getpriority             sys_getpriority
-97     i386    setpriority             sys_setpriority
-98     i386    profil
-99     i386    statfs                  sys_statfs                      compat_sys_statfs
-100    i386    fstatfs                 sys_fstatfs                     compat_sys_fstatfs
-101    i386    ioperm                  sys_ioperm
-102    i386    socketcall              sys_socketcall                  compat_sys_socketcall
-103    i386    syslog                  sys_syslog
-104    i386    setitimer               sys_setitimer                   compat_sys_setitimer
-105    i386    getitimer               sys_getitimer                   compat_sys_getitimer
-106    i386    stat                    sys_newstat                     compat_sys_newstat
-107    i386    lstat                   sys_newlstat                    compat_sys_newlstat
-108    i386    fstat                   sys_newfstat                    compat_sys_newfstat
-109    i386    olduname                sys_uname
-110    i386    iopl                    sys_iopl
-111    i386    vhangup                 sys_vhangup
-112    i386    idle
-113    i386    vm86old                 sys_vm86old                     sys_ni_syscall
-114    i386    wait4                   sys_wait4                       compat_sys_wait4
-115    i386    swapoff                 sys_swapoff
-116    i386    sysinfo                 sys_sysinfo                     compat_sys_sysinfo
-117    i386    ipc                     sys_ipc                         compat_sys_ipc
-118    i386    fsync                   sys_fsync
-119    i386    sigreturn               sys_sigreturn                   stub32_sigreturn
-120    i386    clone                   sys_clone                       stub32_clone
-121    i386    setdomainname           sys_setdomainname
-122    i386    uname                   sys_newuname
-123    i386    modify_ldt              sys_modify_ldt
-124    i386    adjtimex                sys_adjtimex                    compat_sys_adjtimex
-125    i386    mprotect                sys_mprotect
-126    i386    sigprocmask             sys_sigprocmask                 compat_sys_sigprocmask
-127    i386    create_module
-128    i386    init_module             sys_init_module
-129    i386    delete_module           sys_delete_module
-130    i386    get_kernel_syms
-131    i386    quotactl                sys_quotactl                    sys32_quotactl
-132    i386    getpgid                 sys_getpgid
-133    i386    fchdir                  sys_fchdir
-134    i386    bdflush                 sys_bdflush
-135    i386    sysfs                   sys_sysfs
-136    i386    personality             sys_personality
-137    i386    afs_syscall
-138    i386    setfsuid                sys_setfsuid16
-139    i386    setfsgid                sys_setfsgid16
-140    i386    _llseek                 sys_llseek
-141    i386    getdents                sys_getdents                    compat_sys_getdents
-142    i386    _newselect              sys_select                      compat_sys_select
-143    i386    flock                   sys_flock
-144    i386    msync                   sys_msync
-145    i386    readv                   sys_readv                       compat_sys_readv
-146    i386    writev                  sys_writev                      compat_sys_writev
-147    i386    getsid                  sys_getsid
-148    i386    fdatasync               sys_fdatasync
-149    i386    _sysctl                 sys_sysctl                      compat_sys_sysctl
-150    i386    mlock                   sys_mlock
-151    i386    munlock                 sys_munlock
-152    i386    mlockall                sys_mlockall
-153    i386    munlockall              sys_munlockall
-154    i386    sched_setparam          sys_sched_setparam
-155    i386    sched_getparam          sys_sched_getparam
-156    i386    sched_setscheduler      sys_sched_setscheduler
-157    i386    sched_getscheduler      sys_sched_getscheduler
-158    i386    sched_yield             sys_sched_yield
-159    i386    sched_get_priority_max  sys_sched_get_priority_max
-160    i386    sched_get_priority_min  sys_sched_get_priority_min
-161    i386    sched_rr_get_interval   sys_sched_rr_get_interval       compat_sys_sched_rr_get_interval
-162    i386    nanosleep               sys_nanosleep                   compat_sys_nanosleep
-163    i386    mremap                  sys_mremap
-164    i386    setresuid               sys_setresuid16
-165    i386    getresuid               sys_getresuid16
-166    i386    vm86                    sys_vm86                        sys_ni_syscall
-167    i386    query_module
-168    i386    poll                    sys_poll
-169    i386    nfsservctl
-170    i386    setresgid               sys_setresgid16
-171    i386    getresgid               sys_getresgid16
-172    i386    prctl                   sys_prctl
-173    i386    rt_sigreturn            sys_rt_sigreturn                stub32_rt_sigreturn
-174    i386    rt_sigaction            sys_rt_sigaction                compat_sys_rt_sigaction
-175    i386    rt_sigprocmask          sys_rt_sigprocmask
-176    i386    rt_sigpending           sys_rt_sigpending               compat_sys_rt_sigpending
-177    i386    rt_sigtimedwait         sys_rt_sigtimedwait             compat_sys_rt_sigtimedwait
-178    i386    rt_sigqueueinfo         sys_rt_sigqueueinfo             compat_sys_rt_sigqueueinfo
-179    i386    rt_sigsuspend           sys_rt_sigsuspend
-180    i386    pread64                 sys_pread64                     sys32_pread
-181    i386    pwrite64                sys_pwrite64                    sys32_pwrite
-182    i386    chown                   sys_chown16
-183    i386    getcwd                  sys_getcwd
-184    i386    capget                  sys_capget
-185    i386    capset                  sys_capset
-186    i386    sigaltstack             sys_sigaltstack                 compat_sys_sigaltstack
-187    i386    sendfile                sys_sendfile                    compat_sys_sendfile
-188    i386    getpmsg
-189    i386    putpmsg
-190    i386    vfork                   sys_vfork                       stub32_vfork
-191    i386    ugetrlimit              sys_getrlimit                   compat_sys_getrlimit
-192    i386    mmap2                   sys_mmap_pgoff
-193    i386    truncate64              sys_truncate64                  sys32_truncate64
-194    i386    ftruncate64             sys_ftruncate64                 sys32_ftruncate64
-195    i386    stat64                  sys_stat64                      sys32_stat64
-196    i386    lstat64                 sys_lstat64                     sys32_lstat64
-197    i386    fstat64                 sys_fstat64                     sys32_fstat64
-198    i386    lchown32                sys_lchown
-199    i386    getuid32                sys_getuid
-200    i386    getgid32                sys_getgid
-201    i386    geteuid32               sys_geteuid
-202    i386    getegid32               sys_getegid
-203    i386    setreuid32              sys_setreuid
-204    i386    setregid32              sys_setregid
-205    i386    getgroups32             sys_getgroups
-206    i386    setgroups32             sys_setgroups
-207    i386    fchown32                sys_fchown
-208    i386    setresuid32             sys_setresuid
-209    i386    getresuid32             sys_getresuid
-210    i386    setresgid32             sys_setresgid
-211    i386    getresgid32             sys_getresgid
-212    i386    chown32                 sys_chown
-213    i386    setuid32                sys_setuid
-214    i386    setgid32                sys_setgid
-215    i386    setfsuid32              sys_setfsuid
-216    i386    setfsgid32              sys_setfsgid
-217    i386    pivot_root              sys_pivot_root
-218    i386    mincore                 sys_mincore
-219    i386    madvise                 sys_madvise
-220    i386    getdents64              sys_getdents64                  compat_sys_getdents64
-221    i386    fcntl64                 sys_fcntl64                     compat_sys_fcntl64
-# 222 is unused
-# 223 is unused
-224    i386    gettid                  sys_gettid
-225    i386    readahead               sys_readahead                   sys32_readahead
-226    i386    setxattr                sys_setxattr
-227    i386    lsetxattr               sys_lsetxattr
-228    i386    fsetxattr               sys_fsetxattr
-229    i386    getxattr                sys_getxattr
-230    i386    lgetxattr               sys_lgetxattr
-231    i386    fgetxattr               sys_fgetxattr
-232    i386    listxattr               sys_listxattr
-233    i386    llistxattr              sys_llistxattr
-234    i386    flistxattr              sys_flistxattr
-235    i386    removexattr             sys_removexattr
-236    i386    lremovexattr            sys_lremovexattr
-237    i386    fremovexattr            sys_fremovexattr
-238    i386    tkill                   sys_tkill
-239    i386    sendfile64              sys_sendfile64
-240    i386    futex                   sys_futex                       compat_sys_futex
-241    i386    sched_setaffinity       sys_sched_setaffinity           compat_sys_sched_setaffinity
-242    i386    sched_getaffinity       sys_sched_getaffinity           compat_sys_sched_getaffinity
-243    i386    set_thread_area         sys_set_thread_area
-244    i386    get_thread_area         sys_get_thread_area
-245    i386    io_setup                sys_io_setup                    compat_sys_io_setup
-246    i386    io_destroy              sys_io_destroy
-247    i386    io_getevents            sys_io_getevents                compat_sys_io_getevents
-248    i386    io_submit               sys_io_submit                   compat_sys_io_submit
-249    i386    io_cancel               sys_io_cancel
-250    i386    fadvise64               sys_fadvise64                   sys32_fadvise64
-# 251 is available for reuse (was briefly sys_set_zone_reclaim)
-252    i386    exit_group              sys_exit_group
-253    i386    lookup_dcookie          sys_lookup_dcookie              compat_sys_lookup_dcookie
-254    i386    epoll_create            sys_epoll_create
-255    i386    epoll_ctl               sys_epoll_ctl
-256    i386    epoll_wait              sys_epoll_wait
-257    i386    remap_file_pages        sys_remap_file_pages
-258    i386    set_tid_address         sys_set_tid_address
-259    i386    timer_create            sys_timer_create                compat_sys_timer_create
-260    i386    timer_settime           sys_timer_settime               compat_sys_timer_settime
-261    i386    timer_gettime           sys_timer_gettime               compat_sys_timer_gettime
-262    i386    timer_getoverrun        sys_timer_getoverrun
-263    i386    timer_delete            sys_timer_delete
-264    i386    clock_settime           sys_clock_settime               compat_sys_clock_settime
-265    i386    clock_gettime           sys_clock_gettime               compat_sys_clock_gettime
-266    i386    clock_getres            sys_clock_getres                compat_sys_clock_getres
-267    i386    clock_nanosleep         sys_clock_nanosleep             compat_sys_clock_nanosleep
-268    i386    statfs64                sys_statfs64                    compat_sys_statfs64
-269    i386    fstatfs64               sys_fstatfs64                   compat_sys_fstatfs64
-270    i386    tgkill                  sys_tgkill
-271    i386    utimes                  sys_utimes                      compat_sys_utimes
-272    i386    fadvise64_64            sys_fadvise64_64                sys32_fadvise64_64
-273    i386    vserver
-274    i386    mbind                   sys_mbind
-275    i386    get_mempolicy           sys_get_mempolicy               compat_sys_get_mempolicy
-276    i386    set_mempolicy           sys_set_mempolicy
-277    i386    mq_open                 sys_mq_open                     compat_sys_mq_open
-278    i386    mq_unlink               sys_mq_unlink
-279    i386    mq_timedsend            sys_mq_timedsend                compat_sys_mq_timedsend
-280    i386    mq_timedreceive         sys_mq_timedreceive             compat_sys_mq_timedreceive
-281    i386    mq_notify               sys_mq_notify                   compat_sys_mq_notify
-282    i386    mq_getsetattr           sys_mq_getsetattr               compat_sys_mq_getsetattr
-283    i386    kexec_load              sys_kexec_load                  compat_sys_kexec_load
-284    i386    waitid                  sys_waitid                      compat_sys_waitid
-# 285 sys_setaltroot
-286    i386    add_key                 sys_add_key
-287    i386    request_key             sys_request_key
-288    i386    keyctl                  sys_keyctl
-289    i386    ioprio_set              sys_ioprio_set
-290    i386    ioprio_get              sys_ioprio_get
-291    i386    inotify_init            sys_inotify_init
-292    i386    inotify_add_watch       sys_inotify_add_watch
-293    i386    inotify_rm_watch        sys_inotify_rm_watch
-294    i386    migrate_pages           sys_migrate_pages
-295    i386    openat                  sys_openat                      compat_sys_openat
-296    i386    mkdirat                 sys_mkdirat
-297    i386    mknodat                 sys_mknodat
-298    i386    fchownat                sys_fchownat
-299    i386    futimesat               sys_futimesat                   compat_sys_futimesat
-300    i386    fstatat64               sys_fstatat64                   sys32_fstatat
-301    i386    unlinkat                sys_unlinkat
-302    i386    renameat                sys_renameat
-303    i386    linkat                  sys_linkat
-304    i386    symlinkat               sys_symlinkat
-305    i386    readlinkat              sys_readlinkat
-306    i386    fchmodat                sys_fchmodat
-307    i386    faccessat               sys_faccessat
-308    i386    pselect6                sys_pselect6                    compat_sys_pselect6
-309    i386    ppoll                   sys_ppoll                       compat_sys_ppoll
-310    i386    unshare                 sys_unshare
-311    i386    set_robust_list         sys_set_robust_list             compat_sys_set_robust_list
-312    i386    get_robust_list         sys_get_robust_list             compat_sys_get_robust_list
-313    i386    splice                  sys_splice
-314    i386    sync_file_range         sys_sync_file_range             sys32_sync_file_range
-315    i386    tee                     sys_tee
-316    i386    vmsplice                sys_vmsplice                    compat_sys_vmsplice
-317    i386    move_pages              sys_move_pages                  compat_sys_move_pages
-318    i386    getcpu                  sys_getcpu
-319    i386    epoll_pwait             sys_epoll_pwait
-320    i386    utimensat               sys_utimensat                   compat_sys_utimensat
-321    i386    signalfd                sys_signalfd                    compat_sys_signalfd
-322    i386    timerfd_create          sys_timerfd_create
-323    i386    eventfd                 sys_eventfd
-324    i386    fallocate               sys_fallocate                   sys32_fallocate
-325    i386    timerfd_settime         sys_timerfd_settime             compat_sys_timerfd_settime
-326    i386    timerfd_gettime         sys_timerfd_gettime             compat_sys_timerfd_gettime
-327    i386    signalfd4               sys_signalfd4                   compat_sys_signalfd4
-328    i386    eventfd2                sys_eventfd2
-329    i386    epoll_create1           sys_epoll_create1
-330    i386    dup3                    sys_dup3
-331    i386    pipe2                   sys_pipe2
-332    i386    inotify_init1           sys_inotify_init1
-333    i386    preadv                  sys_preadv                      compat_sys_preadv
-334    i386    pwritev                 sys_pwritev                     compat_sys_pwritev
-335    i386    rt_tgsigqueueinfo       sys_rt_tgsigqueueinfo           compat_sys_rt_tgsigqueueinfo
-336    i386    perf_event_open         sys_perf_event_open
-337    i386    recvmmsg                sys_recvmmsg                    compat_sys_recvmmsg
-338    i386    fanotify_init           sys_fanotify_init
-339    i386    fanotify_mark           sys_fanotify_mark               compat_sys_fanotify_mark
-340    i386    prlimit64               sys_prlimit64
-341    i386    name_to_handle_at       sys_name_to_handle_at
-342    i386    open_by_handle_at       sys_open_by_handle_at           compat_sys_open_by_handle_at
-343    i386    clock_adjtime           sys_clock_adjtime               compat_sys_clock_adjtime
-344    i386    syncfs                  sys_syncfs
-345    i386    sendmmsg                sys_sendmmsg                    compat_sys_sendmmsg
-346    i386    setns                   sys_setns
-347    i386    process_vm_readv        sys_process_vm_readv            compat_sys_process_vm_readv
-348    i386    process_vm_writev       sys_process_vm_writev           compat_sys_process_vm_writev
-349    i386    kcmp                    sys_kcmp
-350    i386    finit_module            sys_finit_module
-351    i386    sched_setattr           sys_sched_setattr
-352    i386    sched_getattr           sys_sched_getattr
-353    i386    renameat2               sys_renameat2
-354    i386    seccomp                 sys_seccomp
-355    i386    getrandom               sys_getrandom
-356    i386    memfd_create            sys_memfd_create
-357    i386    bpf                     sys_bpf
-358    i386    execveat                sys_execveat                    stub32_execveat
diff --git a/arch/x86/syscalls/syscall_64.tbl b/arch/x86/syscalls/syscall_64.tbl
deleted file mode 100644 (file)
index 9ef32d5..0000000
+++ /dev/null
@@ -1,370 +0,0 @@
-#
-# 64-bit system call numbers and entry vectors
-#
-# The format is:
-# <number> <abi> <name> <entry point>
-#
-# The abi is "common", "64" or "x32" for this file.
-#
-0      common  read                    sys_read
-1      common  write                   sys_write
-2      common  open                    sys_open
-3      common  close                   sys_close
-4      common  stat                    sys_newstat
-5      common  fstat                   sys_newfstat
-6      common  lstat                   sys_newlstat
-7      common  poll                    sys_poll
-8      common  lseek                   sys_lseek
-9      common  mmap                    sys_mmap
-10     common  mprotect                sys_mprotect
-11     common  munmap                  sys_munmap
-12     common  brk                     sys_brk
-13     64      rt_sigaction            sys_rt_sigaction
-14     common  rt_sigprocmask          sys_rt_sigprocmask
-15     64      rt_sigreturn            stub_rt_sigreturn
-16     64      ioctl                   sys_ioctl
-17     common  pread64                 sys_pread64
-18     common  pwrite64                sys_pwrite64
-19     64      readv                   sys_readv
-20     64      writev                  sys_writev
-21     common  access                  sys_access
-22     common  pipe                    sys_pipe
-23     common  select                  sys_select
-24     common  sched_yield             sys_sched_yield
-25     common  mremap                  sys_mremap
-26     common  msync                   sys_msync
-27     common  mincore                 sys_mincore
-28     common  madvise                 sys_madvise
-29     common  shmget                  sys_shmget
-30     common  shmat                   sys_shmat
-31     common  shmctl                  sys_shmctl
-32     common  dup                     sys_dup
-33     common  dup2                    sys_dup2
-34     common  pause                   sys_pause
-35     common  nanosleep               sys_nanosleep
-36     common  getitimer               sys_getitimer
-37     common  alarm                   sys_alarm
-38     common  setitimer               sys_setitimer
-39     common  getpid                  sys_getpid
-40     common  sendfile                sys_sendfile64
-41     common  socket                  sys_socket
-42     common  connect                 sys_connect
-43     common  accept                  sys_accept
-44     common  sendto                  sys_sendto
-45     64      recvfrom                sys_recvfrom
-46     64      sendmsg                 sys_sendmsg
-47     64      recvmsg                 sys_recvmsg
-48     common  shutdown                sys_shutdown
-49     common  bind                    sys_bind
-50     common  listen                  sys_listen
-51     common  getsockname             sys_getsockname
-52     common  getpeername             sys_getpeername
-53     common  socketpair              sys_socketpair
-54     64      setsockopt              sys_setsockopt
-55     64      getsockopt              sys_getsockopt
-56     common  clone                   stub_clone
-57     common  fork                    stub_fork
-58     common  vfork                   stub_vfork
-59     64      execve                  stub_execve
-60     common  exit                    sys_exit
-61     common  wait4                   sys_wait4
-62     common  kill                    sys_kill
-63     common  uname                   sys_newuname
-64     common  semget                  sys_semget
-65     common  semop                   sys_semop
-66     common  semctl                  sys_semctl
-67     common  shmdt                   sys_shmdt
-68     common  msgget                  sys_msgget
-69     common  msgsnd                  sys_msgsnd
-70     common  msgrcv                  sys_msgrcv
-71     common  msgctl                  sys_msgctl
-72     common  fcntl                   sys_fcntl
-73     common  flock                   sys_flock
-74     common  fsync                   sys_fsync
-75     common  fdatasync               sys_fdatasync
-76     common  truncate                sys_truncate
-77     common  ftruncate               sys_ftruncate
-78     common  getdents                sys_getdents
-79     common  getcwd                  sys_getcwd
-80     common  chdir                   sys_chdir
-81     common  fchdir                  sys_fchdir
-82     common  rename                  sys_rename
-83     common  mkdir                   sys_mkdir
-84     common  rmdir                   sys_rmdir
-85     common  creat                   sys_creat
-86     common  link                    sys_link
-87     common  unlink                  sys_unlink
-88     common  symlink                 sys_symlink
-89     common  readlink                sys_readlink
-90     common  chmod                   sys_chmod
-91     common  fchmod                  sys_fchmod
-92     common  chown                   sys_chown
-93     common  fchown                  sys_fchown
-94     common  lchown                  sys_lchown
-95     common  umask                   sys_umask
-96     common  gettimeofday            sys_gettimeofday
-97     common  getrlimit               sys_getrlimit
-98     common  getrusage               sys_getrusage
-99     common  sysinfo                 sys_sysinfo
-100    common  times                   sys_times
-101    64      ptrace                  sys_ptrace
-102    common  getuid                  sys_getuid
-103    common  syslog                  sys_syslog
-104    common  getgid                  sys_getgid
-105    common  setuid                  sys_setuid
-106    common  setgid                  sys_setgid
-107    common  geteuid                 sys_geteuid
-108    common  getegid                 sys_getegid
-109    common  setpgid                 sys_setpgid
-110    common  getppid                 sys_getppid
-111    common  getpgrp                 sys_getpgrp
-112    common  setsid                  sys_setsid
-113    common  setreuid                sys_setreuid
-114    common  setregid                sys_setregid
-115    common  getgroups               sys_getgroups
-116    common  setgroups               sys_setgroups
-117    common  setresuid               sys_setresuid
-118    common  getresuid               sys_getresuid
-119    common  setresgid               sys_setresgid
-120    common  getresgid               sys_getresgid
-121    common  getpgid                 sys_getpgid
-122    common  setfsuid                sys_setfsuid
-123    common  setfsgid                sys_setfsgid
-124    common  getsid                  sys_getsid
-125    common  capget                  sys_capget
-126    common  capset                  sys_capset
-127    64      rt_sigpending           sys_rt_sigpending
-128    64      rt_sigtimedwait         sys_rt_sigtimedwait
-129    64      rt_sigqueueinfo         sys_rt_sigqueueinfo
-130    common  rt_sigsuspend           sys_rt_sigsuspend
-131    64      sigaltstack             sys_sigaltstack
-132    common  utime                   sys_utime
-133    common  mknod                   sys_mknod
-134    64      uselib
-135    common  personality             sys_personality
-136    common  ustat                   sys_ustat
-137    common  statfs                  sys_statfs
-138    common  fstatfs                 sys_fstatfs
-139    common  sysfs                   sys_sysfs
-140    common  getpriority             sys_getpriority
-141    common  setpriority             sys_setpriority
-142    common  sched_setparam          sys_sched_setparam
-143    common  sched_getparam          sys_sched_getparam
-144    common  sched_setscheduler      sys_sched_setscheduler
-145    common  sched_getscheduler      sys_sched_getscheduler
-146    common  sched_get_priority_max  sys_sched_get_priority_max
-147    common  sched_get_priority_min  sys_sched_get_priority_min
-148    common  sched_rr_get_interval   sys_sched_rr_get_interval
-149    common  mlock                   sys_mlock
-150    common  munlock                 sys_munlock
-151    common  mlockall                sys_mlockall
-152    common  munlockall              sys_munlockall
-153    common  vhangup                 sys_vhangup
-154    common  modify_ldt              sys_modify_ldt
-155    common  pivot_root              sys_pivot_root
-156    64      _sysctl                 sys_sysctl
-157    common  prctl                   sys_prctl
-158    common  arch_prctl              sys_arch_prctl
-159    common  adjtimex                sys_adjtimex
-160    common  setrlimit               sys_setrlimit
-161    common  chroot                  sys_chroot
-162    common  sync                    sys_sync
-163    common  acct                    sys_acct
-164    common  settimeofday            sys_settimeofday
-165    common  mount                   sys_mount
-166    common  umount2                 sys_umount
-167    common  swapon                  sys_swapon
-168    common  swapoff                 sys_swapoff
-169    common  reboot                  sys_reboot
-170    common  sethostname             sys_sethostname
-171    common  setdomainname           sys_setdomainname
-172    common  iopl                    sys_iopl
-173    common  ioperm                  sys_ioperm
-174    64      create_module
-175    common  init_module             sys_init_module
-176    common  delete_module           sys_delete_module
-177    64      get_kernel_syms
-178    64      query_module
-179    common  quotactl                sys_quotactl
-180    64      nfsservctl
-181    common  getpmsg
-182    common  putpmsg
-183    common  afs_syscall
-184    common  tuxcall
-185    common  security
-186    common  gettid                  sys_gettid
-187    common  readahead               sys_readahead
-188    common  setxattr                sys_setxattr
-189    common  lsetxattr               sys_lsetxattr
-190    common  fsetxattr               sys_fsetxattr
-191    common  getxattr                sys_getxattr
-192    common  lgetxattr               sys_lgetxattr
-193    common  fgetxattr               sys_fgetxattr
-194    common  listxattr               sys_listxattr
-195    common  llistxattr              sys_llistxattr
-196    common  flistxattr              sys_flistxattr
-197    common  removexattr             sys_removexattr
-198    common  lremovexattr            sys_lremovexattr
-199    common  fremovexattr            sys_fremovexattr
-200    common  tkill                   sys_tkill
-201    common  time                    sys_time
-202    common  futex                   sys_futex
-203    common  sched_setaffinity       sys_sched_setaffinity
-204    common  sched_getaffinity       sys_sched_getaffinity
-205    64      set_thread_area
-206    64      io_setup                sys_io_setup
-207    common  io_destroy              sys_io_destroy
-208    common  io_getevents            sys_io_getevents
-209    64      io_submit               sys_io_submit
-210    common  io_cancel               sys_io_cancel
-211    64      get_thread_area
-212    common  lookup_dcookie          sys_lookup_dcookie
-213    common  epoll_create            sys_epoll_create
-214    64      epoll_ctl_old
-215    64      epoll_wait_old
-216    common  remap_file_pages        sys_remap_file_pages
-217    common  getdents64              sys_getdents64
-218    common  set_tid_address         sys_set_tid_address
-219    common  restart_syscall         sys_restart_syscall
-220    common  semtimedop              sys_semtimedop
-221    common  fadvise64               sys_fadvise64
-222    64      timer_create            sys_timer_create
-223    common  timer_settime           sys_timer_settime
-224    common  timer_gettime           sys_timer_gettime
-225    common  timer_getoverrun        sys_timer_getoverrun
-226    common  timer_delete            sys_timer_delete
-227    common  clock_settime           sys_clock_settime
-228    common  clock_gettime           sys_clock_gettime
-229    common  clock_getres            sys_clock_getres
-230    common  clock_nanosleep         sys_clock_nanosleep
-231    common  exit_group              sys_exit_group
-232    common  epoll_wait              sys_epoll_wait
-233    common  epoll_ctl               sys_epoll_ctl
-234    common  tgkill                  sys_tgkill
-235    common  utimes                  sys_utimes
-236    64      vserver
-237    common  mbind                   sys_mbind
-238    common  set_mempolicy           sys_set_mempolicy
-239    common  get_mempolicy           sys_get_mempolicy
-240    common  mq_open                 sys_mq_open
-241    common  mq_unlink               sys_mq_unlink
-242    common  mq_timedsend            sys_mq_timedsend
-243    common  mq_timedreceive         sys_mq_timedreceive
-244    64      mq_notify               sys_mq_notify
-245    common  mq_getsetattr           sys_mq_getsetattr
-246    64      kexec_load              sys_kexec_load
-247    64      waitid                  sys_waitid
-248    common  add_key                 sys_add_key
-249    common  request_key             sys_request_key
-250    common  keyctl                  sys_keyctl
-251    common  ioprio_set              sys_ioprio_set
-252    common  ioprio_get              sys_ioprio_get
-253    common  inotify_init            sys_inotify_init
-254    common  inotify_add_watch       sys_inotify_add_watch
-255    common  inotify_rm_watch        sys_inotify_rm_watch
-256    common  migrate_pages           sys_migrate_pages
-257    common  openat                  sys_openat
-258    common  mkdirat                 sys_mkdirat
-259    common  mknodat                 sys_mknodat
-260    common  fchownat                sys_fchownat
-261    common  futimesat               sys_futimesat
-262    common  newfstatat              sys_newfstatat
-263    common  unlinkat                sys_unlinkat
-264    common  renameat                sys_renameat
-265    common  linkat                  sys_linkat
-266    common  symlinkat               sys_symlinkat
-267    common  readlinkat              sys_readlinkat
-268    common  fchmodat                sys_fchmodat
-269    common  faccessat               sys_faccessat
-270    common  pselect6                sys_pselect6
-271    common  ppoll                   sys_ppoll
-272    common  unshare                 sys_unshare
-273    64      set_robust_list         sys_set_robust_list
-274    64      get_robust_list         sys_get_robust_list
-275    common  splice                  sys_splice
-276    common  tee                     sys_tee
-277    common  sync_file_range         sys_sync_file_range
-278    64      vmsplice                sys_vmsplice
-279    64      move_pages              sys_move_pages
-280    common  utimensat               sys_utimensat
-281    common  epoll_pwait             sys_epoll_pwait
-282    common  signalfd                sys_signalfd
-283    common  timerfd_create          sys_timerfd_create
-284    common  eventfd                 sys_eventfd
-285    common  fallocate               sys_fallocate
-286    common  timerfd_settime         sys_timerfd_settime
-287    common  timerfd_gettime         sys_timerfd_gettime
-288    common  accept4                 sys_accept4
-289    common  signalfd4               sys_signalfd4
-290    common  eventfd2                sys_eventfd2
-291    common  epoll_create1           sys_epoll_create1
-292    common  dup3                    sys_dup3
-293    common  pipe2                   sys_pipe2
-294    common  inotify_init1           sys_inotify_init1
-295    64      preadv                  sys_preadv
-296    64      pwritev                 sys_pwritev
-297    64      rt_tgsigqueueinfo       sys_rt_tgsigqueueinfo
-298    common  perf_event_open         sys_perf_event_open
-299    64      recvmmsg                sys_recvmmsg
-300    common  fanotify_init           sys_fanotify_init
-301    common  fanotify_mark           sys_fanotify_mark
-302    common  prlimit64               sys_prlimit64
-303    common  name_to_handle_at       sys_name_to_handle_at
-304    common  open_by_handle_at       sys_open_by_handle_at
-305    common  clock_adjtime           sys_clock_adjtime
-306    common  syncfs                  sys_syncfs
-307    64      sendmmsg                sys_sendmmsg
-308    common  setns                   sys_setns
-309    common  getcpu                  sys_getcpu
-310    64      process_vm_readv        sys_process_vm_readv
-311    64      process_vm_writev       sys_process_vm_writev
-312    common  kcmp                    sys_kcmp
-313    common  finit_module            sys_finit_module
-314    common  sched_setattr           sys_sched_setattr
-315    common  sched_getattr           sys_sched_getattr
-316    common  renameat2               sys_renameat2
-317    common  seccomp                 sys_seccomp
-318    common  getrandom               sys_getrandom
-319    common  memfd_create            sys_memfd_create
-320    common  kexec_file_load         sys_kexec_file_load
-321    common  bpf                     sys_bpf
-322    64      execveat                stub_execveat
-
-#
-# x32-specific system call numbers start at 512 to avoid cache impact
-# for native 64-bit operation.
-#
-512    x32     rt_sigaction            compat_sys_rt_sigaction
-513    x32     rt_sigreturn            stub_x32_rt_sigreturn
-514    x32     ioctl                   compat_sys_ioctl
-515    x32     readv                   compat_sys_readv
-516    x32     writev                  compat_sys_writev
-517    x32     recvfrom                compat_sys_recvfrom
-518    x32     sendmsg                 compat_sys_sendmsg
-519    x32     recvmsg                 compat_sys_recvmsg
-520    x32     execve                  stub_x32_execve
-521    x32     ptrace                  compat_sys_ptrace
-522    x32     rt_sigpending           compat_sys_rt_sigpending
-523    x32     rt_sigtimedwait         compat_sys_rt_sigtimedwait
-524    x32     rt_sigqueueinfo         compat_sys_rt_sigqueueinfo
-525    x32     sigaltstack             compat_sys_sigaltstack
-526    x32     timer_create            compat_sys_timer_create
-527    x32     mq_notify               compat_sys_mq_notify
-528    x32     kexec_load              compat_sys_kexec_load
-529    x32     waitid                  compat_sys_waitid
-530    x32     set_robust_list         compat_sys_set_robust_list
-531    x32     get_robust_list         compat_sys_get_robust_list
-532    x32     vmsplice                compat_sys_vmsplice
-533    x32     move_pages              compat_sys_move_pages
-534    x32     preadv                  compat_sys_preadv64
-535    x32     pwritev                 compat_sys_pwritev64
-536    x32     rt_tgsigqueueinfo       compat_sys_rt_tgsigqueueinfo
-537    x32     recvmmsg                compat_sys_recvmmsg
-538    x32     sendmmsg                compat_sys_sendmmsg
-539    x32     process_vm_readv        compat_sys_process_vm_readv
-540    x32     process_vm_writev       compat_sys_process_vm_writev
-541    x32     setsockopt              compat_sys_setsockopt
-542    x32     getsockopt              compat_sys_getsockopt
-543    x32     io_setup                compat_sys_io_setup
-544    x32     io_submit               compat_sys_io_submit
-545    x32     execveat                stub_x32_execveat
diff --git a/arch/x86/syscalls/syscallhdr.sh b/arch/x86/syscalls/syscallhdr.sh
deleted file mode 100644 (file)
index 31fd5f1..0000000
+++ /dev/null
@@ -1,27 +0,0 @@
-#!/bin/sh
-
-in="$1"
-out="$2"
-my_abis=`echo "($3)" | tr ',' '|'`
-prefix="$4"
-offset="$5"
-
-fileguard=_ASM_X86_`basename "$out" | sed \
-    -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
-    -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
-grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
-    echo "#ifndef ${fileguard}"
-    echo "#define ${fileguard} 1"
-    echo ""
-
-    while read nr abi name entry ; do
-       if [ -z "$offset" ]; then
-           echo "#define __NR_${prefix}${name} $nr"
-       else
-           echo "#define __NR_${prefix}${name} ($offset + $nr)"
-        fi
-    done
-
-    echo ""
-    echo "#endif /* ${fileguard} */"
-) > "$out"
diff --git a/arch/x86/syscalls/syscalltbl.sh b/arch/x86/syscalls/syscalltbl.sh
deleted file mode 100644 (file)
index 0e7f8ec..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-#!/bin/sh
-
-in="$1"
-out="$2"
-
-grep '^[0-9]' "$in" | sort -n | (
-    while read nr abi name entry compat; do
-       abi=`echo "$abi" | tr '[a-z]' '[A-Z]'`
-       if [ -n "$compat" ]; then
-           echo "__SYSCALL_${abi}($nr, $entry, $compat)"
-       elif [ -n "$entry" ]; then
-           echo "__SYSCALL_${abi}($nr, $entry, $entry)"
-       fi
-    done
-) > "$out"
index acb384d246694e974d5d722edd96856e2774fc1b..a8fecc226946dc1fb65960f84b26f53fde82f57e 100644 (file)
@@ -26,7 +26,7 @@ else
 
 obj-y += syscalls_64.o vdso/
 
-subarch-y = ../lib/csum-partial_64.o ../lib/memcpy_64.o ../lib/thunk_64.o \
+subarch-y = ../lib/csum-partial_64.o ../lib/memcpy_64.o ../entry/thunk_64.o \
                ../lib/rwsem.o
 
 endif
index 7e8a1a6504356159bd1b604b53de4ed194ddd19c..b9531d34313491b5180878f64ca18f49fb69c94f 100644 (file)
@@ -39,7 +39,8 @@
 #define smp_mb()       barrier()
 #define smp_rmb()      barrier()
 #define smp_wmb()      barrier()
-#define set_mb(var, value) do { var = value; barrier(); } while (0)
+
+#define smp_store_mb(var, value) do { WRITE_ONCE(var, value); barrier(); } while (0)
 
 #define read_barrier_depends()         do { } while (0)
 #define smp_read_barrier_depends()     do { } while (0)
diff --git a/arch/x86/vdso/.gitignore b/arch/x86/vdso/.gitignore
deleted file mode 100644 (file)
index aae8ffd..0000000
+++ /dev/null
@@ -1,7 +0,0 @@
-vdso.lds
-vdsox32.lds
-vdso32-syscall-syms.lds
-vdso32-sysenter-syms.lds
-vdso32-int80-syms.lds
-vdso-image-*.c
-vdso2c
diff --git a/arch/x86/vdso/Makefile b/arch/x86/vdso/Makefile
deleted file mode 100644 (file)
index 275a3a8..0000000
+++ /dev/null
@@ -1,209 +0,0 @@
-#
-# Building vDSO images for x86.
-#
-
-KBUILD_CFLAGS += $(DISABLE_LTO)
-KASAN_SANITIZE := n
-
-VDSO64-$(CONFIG_X86_64)                := y
-VDSOX32-$(CONFIG_X86_X32_ABI)  := y
-VDSO32-$(CONFIG_X86_32)                := y
-VDSO32-$(CONFIG_COMPAT)                := y
-
-# files to link into the vdso
-vobjs-y := vdso-note.o vclock_gettime.o vgetcpu.o
-
-# files to link into kernel
-obj-y                          += vma.o
-
-# vDSO images to build
-vdso_img-$(VDSO64-y)           += 64
-vdso_img-$(VDSOX32-y)          += x32
-vdso_img-$(VDSO32-y)           += 32-int80
-vdso_img-$(CONFIG_COMPAT)      += 32-syscall
-vdso_img-$(VDSO32-y)           += 32-sysenter
-
-obj-$(VDSO32-y)                        += vdso32-setup.o
-
-vobjs := $(foreach F,$(vobjs-y),$(obj)/$F)
-
-$(obj)/vdso.o: $(obj)/vdso.so
-
-targets += vdso.lds $(vobjs-y)
-
-# Build the vDSO image C files and link them in.
-vdso_img_objs := $(vdso_img-y:%=vdso-image-%.o)
-vdso_img_cfiles := $(vdso_img-y:%=vdso-image-%.c)
-vdso_img_sodbg := $(vdso_img-y:%=vdso%.so.dbg)
-obj-y += $(vdso_img_objs)
-targets += $(vdso_img_cfiles)
-targets += $(vdso_img_sodbg)
-.SECONDARY: $(vdso_img-y:%=$(obj)/vdso-image-%.c) \
-       $(vdso_img-y:%=$(obj)/vdso%.so)
-
-export CPPFLAGS_vdso.lds += -P -C
-
-VDSO_LDFLAGS_vdso.lds = -m64 -Wl,-soname=linux-vdso.so.1 \
-                       -Wl,--no-undefined \
-                       -Wl,-z,max-page-size=4096 -Wl,-z,common-page-size=4096 \
-                       $(DISABLE_LTO)
-
-$(obj)/vdso64.so.dbg: $(src)/vdso.lds $(vobjs) FORCE
-       $(call if_changed,vdso)
-
-HOST_EXTRACFLAGS += -I$(srctree)/tools/include -I$(srctree)/include/uapi
-hostprogs-y                    += vdso2c
-
-quiet_cmd_vdso2c = VDSO2C  $@
-define cmd_vdso2c
-       $(obj)/vdso2c $< $(<:%.dbg=%) $@
-endef
-
-$(obj)/vdso-image-%.c: $(obj)/vdso%.so.dbg $(obj)/vdso%.so $(obj)/vdso2c FORCE
-       $(call if_changed,vdso2c)
-
-#
-# Don't omit frame pointers for ease of userspace debugging, but do
-# optimize sibling calls.
-#
-CFL := $(PROFILING) -mcmodel=small -fPIC -O2 -fasynchronous-unwind-tables -m64 \
-       $(filter -g%,$(KBUILD_CFLAGS)) $(call cc-option, -fno-stack-protector) \
-       -fno-omit-frame-pointer -foptimize-sibling-calls \
-       -DDISABLE_BRANCH_PROFILING
-
-$(vobjs): KBUILD_CFLAGS += $(CFL)
-
-#
-# vDSO code runs in userspace and -pg doesn't help with profiling anyway.
-#
-CFLAGS_REMOVE_vdso-note.o = -pg
-CFLAGS_REMOVE_vclock_gettime.o = -pg
-CFLAGS_REMOVE_vgetcpu.o = -pg
-CFLAGS_REMOVE_vvar.o = -pg
-
-#
-# X32 processes use x32 vDSO to access 64bit kernel data.
-#
-# Build x32 vDSO image:
-# 1. Compile x32 vDSO as 64bit.
-# 2. Convert object files to x32.
-# 3. Build x32 VDSO image with x32 objects, which contains 64bit codes
-# so that it can reach 64bit address space with 64bit pointers.
-#
-
-CPPFLAGS_vdsox32.lds = $(CPPFLAGS_vdso.lds)
-VDSO_LDFLAGS_vdsox32.lds = -Wl,-m,elf32_x86_64 \
-                          -Wl,-soname=linux-vdso.so.1 \
-                          -Wl,-z,max-page-size=4096 \
-                          -Wl,-z,common-page-size=4096
-
-# 64-bit objects to re-brand as x32
-vobjs64-for-x32 := $(filter-out $(vobjs-nox32),$(vobjs-y))
-
-# x32-rebranded versions
-vobjx32s-y := $(vobjs64-for-x32:.o=-x32.o)
-
-# same thing, but in the output directory
-vobjx32s := $(foreach F,$(vobjx32s-y),$(obj)/$F)
-
-# Convert 64bit object file to x32 for x32 vDSO.
-quiet_cmd_x32 = X32     $@
-      cmd_x32 = $(OBJCOPY) -O elf32-x86-64 $< $@
-
-$(obj)/%-x32.o: $(obj)/%.o FORCE
-       $(call if_changed,x32)
-
-targets += vdsox32.lds $(vobjx32s-y)
-
-$(obj)/%.so: OBJCOPYFLAGS := -S
-$(obj)/%.so: $(obj)/%.so.dbg
-       $(call if_changed,objcopy)
-
-$(obj)/vdsox32.so.dbg: $(src)/vdsox32.lds $(vobjx32s) FORCE
-       $(call if_changed,vdso)
-
-#
-# Build multiple 32-bit vDSO images to choose from at boot time.
-#
-vdso32.so-$(VDSO32-y)          += int80
-vdso32.so-$(CONFIG_COMPAT)     += syscall
-vdso32.so-$(VDSO32-y)          += sysenter
-
-vdso32-images                  = $(vdso32.so-y:%=vdso32-%.so)
-
-CPPFLAGS_vdso32.lds = $(CPPFLAGS_vdso.lds)
-VDSO_LDFLAGS_vdso32.lds = -m32 -Wl,-m,elf_i386 -Wl,-soname=linux-gate.so.1
-
-# This makes sure the $(obj) subdirectory exists even though vdso32/
-# is not a kbuild sub-make subdirectory.
-override obj-dirs = $(dir $(obj)) $(obj)/vdso32/
-
-targets += vdso32/vdso32.lds
-targets += vdso32/note.o vdso32/vclock_gettime.o $(vdso32.so-y:%=vdso32/%.o)
-targets += vdso32/vclock_gettime.o
-
-$(obj)/vdso32.o: $(vdso32-images:%=$(obj)/%)
-
-KBUILD_AFLAGS_32 := $(filter-out -m64,$(KBUILD_AFLAGS))
-$(vdso32-images:%=$(obj)/%.dbg): KBUILD_AFLAGS = $(KBUILD_AFLAGS_32)
-$(vdso32-images:%=$(obj)/%.dbg): asflags-$(CONFIG_X86_64) += -m32
-
-KBUILD_CFLAGS_32 := $(filter-out -m64,$(KBUILD_CFLAGS))
-KBUILD_CFLAGS_32 := $(filter-out -mcmodel=kernel,$(KBUILD_CFLAGS_32))
-KBUILD_CFLAGS_32 := $(filter-out -fno-pic,$(KBUILD_CFLAGS_32))
-KBUILD_CFLAGS_32 := $(filter-out -mfentry,$(KBUILD_CFLAGS_32))
-KBUILD_CFLAGS_32 += -m32 -msoft-float -mregparm=0 -fpic
-KBUILD_CFLAGS_32 += $(call cc-option, -fno-stack-protector)
-KBUILD_CFLAGS_32 += $(call cc-option, -foptimize-sibling-calls)
-KBUILD_CFLAGS_32 += -fno-omit-frame-pointer
-KBUILD_CFLAGS_32 += -DDISABLE_BRANCH_PROFILING
-$(vdso32-images:%=$(obj)/%.dbg): KBUILD_CFLAGS = $(KBUILD_CFLAGS_32)
-
-$(vdso32-images:%=$(obj)/%.dbg): $(obj)/vdso32-%.so.dbg: FORCE \
-                                $(obj)/vdso32/vdso32.lds \
-                                $(obj)/vdso32/vclock_gettime.o \
-                                $(obj)/vdso32/note.o \
-                                $(obj)/vdso32/%.o
-       $(call if_changed,vdso)
-
-#
-# The DSO images are built using a special linker script.
-#
-quiet_cmd_vdso = VDSO    $@
-      cmd_vdso = $(CC) -nostdlib -o $@ \
-                      $(VDSO_LDFLAGS) $(VDSO_LDFLAGS_$(filter %.lds,$(^F))) \
-                      -Wl,-T,$(filter %.lds,$^) $(filter %.o,$^) && \
-                sh $(srctree)/$(src)/checkundef.sh '$(NM)' '$@'
-
-VDSO_LDFLAGS = -fPIC -shared $(call cc-ldoption, -Wl$(comma)--hash-style=sysv) \
-       $(call cc-ldoption, -Wl$(comma)--build-id) -Wl,-Bsymbolic $(LTO_CFLAGS)
-GCOV_PROFILE := n
-
-#
-# Install the unstripped copies of vdso*.so.  If our toolchain supports
-# build-id, install .build-id links as well.
-#
-quiet_cmd_vdso_install = INSTALL $(@:install_%=%)
-define cmd_vdso_install
-       cp $< "$(MODLIB)/vdso/$(@:install_%=%)"; \
-       if readelf -n $< |grep -q 'Build ID'; then \
-         buildid=`readelf -n $< |grep 'Build ID' |sed -e 's/^.*Build ID: \(.*\)$$/\1/'`; \
-         first=`echo $$buildid | cut -b-2`; \
-         last=`echo $$buildid | cut -b3-`; \
-         mkdir -p "$(MODLIB)/vdso/.build-id/$$first"; \
-         ln -sf "../../$(@:install_%=%)" "$(MODLIB)/vdso/.build-id/$$first/$$last.debug"; \
-       fi
-endef
-
-vdso_img_insttargets := $(vdso_img_sodbg:%.dbg=install_%)
-
-$(MODLIB)/vdso: FORCE
-       @mkdir -p $(MODLIB)/vdso
-
-$(vdso_img_insttargets): install_%: $(obj)/%.dbg $(MODLIB)/vdso FORCE
-       $(call cmd,vdso_install)
-
-PHONY += vdso_install $(vdso_img_insttargets)
-vdso_install: $(vdso_img_insttargets) FORCE
-
-clean-files := vdso32-syscall* vdso32-sysenter* vdso32-int80* vdso64* vdso-image-*.c vdsox32.so*
diff --git a/arch/x86/vdso/checkundef.sh b/arch/x86/vdso/checkundef.sh
deleted file mode 100755 (executable)
index 7ee90a9..0000000
+++ /dev/null
@@ -1,10 +0,0 @@
-#!/bin/sh
-nm="$1"
-file="$2"
-$nm "$file" | grep '^ *U' > /dev/null 2>&1
-if [ $? -eq 1 ]; then
-    exit 0
-else
-    echo "$file: undefined symbols found" >&2
-    exit 1
-fi
diff --git a/arch/x86/vdso/vclock_gettime.c b/arch/x86/vdso/vclock_gettime.c
deleted file mode 100644 (file)
index 9793322..0000000
+++ /dev/null
@@ -1,351 +0,0 @@
-/*
- * Copyright 2006 Andi Kleen, SUSE Labs.
- * Subject to the GNU Public License, v.2
- *
- * Fast user context implementation of clock_gettime, gettimeofday, and time.
- *
- * 32 Bit compat layer by Stefani Seibold <stefani@seibold.net>
- *  sponsored by Rohde & Schwarz GmbH & Co. KG Munich/Germany
- *
- * The code should have no internal unresolved relocations.
- * Check with readelf after changing.
- */
-
-#include <uapi/linux/time.h>
-#include <asm/vgtod.h>
-#include <asm/hpet.h>
-#include <asm/vvar.h>
-#include <asm/unistd.h>
-#include <asm/msr.h>
-#include <linux/math64.h>
-#include <linux/time.h>
-
-#define gtod (&VVAR(vsyscall_gtod_data))
-
-extern int __vdso_clock_gettime(clockid_t clock, struct timespec *ts);
-extern int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz);
-extern time_t __vdso_time(time_t *t);
-
-#ifdef CONFIG_HPET_TIMER
-extern u8 hpet_page
-       __attribute__((visibility("hidden")));
-
-static notrace cycle_t vread_hpet(void)
-{
-       return *(const volatile u32 *)(&hpet_page + HPET_COUNTER);
-}
-#endif
-
-#ifndef BUILD_VDSO32
-
-#include <linux/kernel.h>
-#include <asm/vsyscall.h>
-#include <asm/fixmap.h>
-#include <asm/pvclock.h>
-
-notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
-{
-       long ret;
-       asm("syscall" : "=a" (ret) :
-           "0" (__NR_clock_gettime), "D" (clock), "S" (ts) : "memory");
-       return ret;
-}
-
-notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
-{
-       long ret;
-
-       asm("syscall" : "=a" (ret) :
-           "0" (__NR_gettimeofday), "D" (tv), "S" (tz) : "memory");
-       return ret;
-}
-
-#ifdef CONFIG_PARAVIRT_CLOCK
-
-static notrace const struct pvclock_vsyscall_time_info *get_pvti(int cpu)
-{
-       const struct pvclock_vsyscall_time_info *pvti_base;
-       int idx = cpu / (PAGE_SIZE/PVTI_SIZE);
-       int offset = cpu % (PAGE_SIZE/PVTI_SIZE);
-
-       BUG_ON(PVCLOCK_FIXMAP_BEGIN + idx > PVCLOCK_FIXMAP_END);
-
-       pvti_base = (struct pvclock_vsyscall_time_info *)
-                   __fix_to_virt(PVCLOCK_FIXMAP_BEGIN+idx);
-
-       return &pvti_base[offset];
-}
-
-static notrace cycle_t vread_pvclock(int *mode)
-{
-       const struct pvclock_vsyscall_time_info *pvti;
-       cycle_t ret;
-       u64 last;
-       u32 version;
-       u8 flags;
-       unsigned cpu, cpu1;
-
-
-       /*
-        * Note: hypervisor must guarantee that:
-        * 1. cpu ID number maps 1:1 to per-CPU pvclock time info.
-        * 2. that per-CPU pvclock time info is updated if the
-        *    underlying CPU changes.
-        * 3. that version is increased whenever underlying CPU
-        *    changes.
-        *
-        */
-       do {
-               cpu = __getcpu() & VGETCPU_CPU_MASK;
-               /* TODO: We can put vcpu id into higher bits of pvti.version.
-                * This will save a couple of cycles by getting rid of
-                * __getcpu() calls (Gleb).
-                */
-
-               pvti = get_pvti(cpu);
-
-               version = __pvclock_read_cycles(&pvti->pvti, &ret, &flags);
-
-               /*
-                * Test we're still on the cpu as well as the version.
-                * We could have been migrated just after the first
-                * vgetcpu but before fetching the version, so we
-                * wouldn't notice a version change.
-                */
-               cpu1 = __getcpu() & VGETCPU_CPU_MASK;
-       } while (unlikely(cpu != cpu1 ||
-                         (pvti->pvti.version & 1) ||
-                         pvti->pvti.version != version));
-
-       if (unlikely(!(flags & PVCLOCK_TSC_STABLE_BIT)))
-               *mode = VCLOCK_NONE;
-
-       /* refer to tsc.c read_tsc() comment for rationale */
-       last = gtod->cycle_last;
-
-       if (likely(ret >= last))
-               return ret;
-
-       return last;
-}
-#endif
-
-#else
-
-notrace static long vdso_fallback_gettime(long clock, struct timespec *ts)
-{
-       long ret;
-
-       asm(
-               "mov %%ebx, %%edx \n"
-               "mov %2, %%ebx \n"
-               "call __kernel_vsyscall \n"
-               "mov %%edx, %%ebx \n"
-               : "=a" (ret)
-               : "0" (__NR_clock_gettime), "g" (clock), "c" (ts)
-               : "memory", "edx");
-       return ret;
-}
-
-notrace static long vdso_fallback_gtod(struct timeval *tv, struct timezone *tz)
-{
-       long ret;
-
-       asm(
-               "mov %%ebx, %%edx \n"
-               "mov %2, %%ebx \n"
-               "call __kernel_vsyscall \n"
-               "mov %%edx, %%ebx \n"
-               : "=a" (ret)
-               : "0" (__NR_gettimeofday), "g" (tv), "c" (tz)
-               : "memory", "edx");
-       return ret;
-}
-
-#ifdef CONFIG_PARAVIRT_CLOCK
-
-static notrace cycle_t vread_pvclock(int *mode)
-{
-       *mode = VCLOCK_NONE;
-       return 0;
-}
-#endif
-
-#endif
-
-notrace static cycle_t vread_tsc(void)
-{
-       cycle_t ret;
-       u64 last;
-
-       /*
-        * Empirically, a fence (of type that depends on the CPU)
-        * before rdtsc is enough to ensure that rdtsc is ordered
-        * with respect to loads.  The various CPU manuals are unclear
-        * as to whether rdtsc can be reordered with later loads,
-        * but no one has ever seen it happen.
-        */
-       rdtsc_barrier();
-       ret = (cycle_t)__native_read_tsc();
-
-       last = gtod->cycle_last;
-
-       if (likely(ret >= last))
-               return ret;
-
-       /*
-        * GCC likes to generate cmov here, but this branch is extremely
-        * predictable (it's just a funciton of time and the likely is
-        * very likely) and there's a data dependence, so force GCC
-        * to generate a branch instead.  I don't barrier() because
-        * we don't actually need a barrier, and if this function
-        * ever gets inlined it will generate worse code.
-        */
-       asm volatile ("");
-       return last;
-}
-
-notrace static inline u64 vgetsns(int *mode)
-{
-       u64 v;
-       cycles_t cycles;
-
-       if (gtod->vclock_mode == VCLOCK_TSC)
-               cycles = vread_tsc();
-#ifdef CONFIG_HPET_TIMER
-       else if (gtod->vclock_mode == VCLOCK_HPET)
-               cycles = vread_hpet();
-#endif
-#ifdef CONFIG_PARAVIRT_CLOCK
-       else if (gtod->vclock_mode == VCLOCK_PVCLOCK)
-               cycles = vread_pvclock(mode);
-#endif
-       else
-               return 0;
-       v = (cycles - gtod->cycle_last) & gtod->mask;
-       return v * gtod->mult;
-}
-
-/* Code size doesn't matter (vdso is 4k anyway) and this is faster. */
-notrace static int __always_inline do_realtime(struct timespec *ts)
-{
-       unsigned long seq;
-       u64 ns;
-       int mode;
-
-       do {
-               seq = gtod_read_begin(gtod);
-               mode = gtod->vclock_mode;
-               ts->tv_sec = gtod->wall_time_sec;
-               ns = gtod->wall_time_snsec;
-               ns += vgetsns(&mode);
-               ns >>= gtod->shift;
-       } while (unlikely(gtod_read_retry(gtod, seq)));
-
-       ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
-       ts->tv_nsec = ns;
-
-       return mode;
-}
-
-notrace static int __always_inline do_monotonic(struct timespec *ts)
-{
-       unsigned long seq;
-       u64 ns;
-       int mode;
-
-       do {
-               seq = gtod_read_begin(gtod);
-               mode = gtod->vclock_mode;
-               ts->tv_sec = gtod->monotonic_time_sec;
-               ns = gtod->monotonic_time_snsec;
-               ns += vgetsns(&mode);
-               ns >>= gtod->shift;
-       } while (unlikely(gtod_read_retry(gtod, seq)));
-
-       ts->tv_sec += __iter_div_u64_rem(ns, NSEC_PER_SEC, &ns);
-       ts->tv_nsec = ns;
-
-       return mode;
-}
-
-notrace static void do_realtime_coarse(struct timespec *ts)
-{
-       unsigned long seq;
-       do {
-               seq = gtod_read_begin(gtod);
-               ts->tv_sec = gtod->wall_time_coarse_sec;
-               ts->tv_nsec = gtod->wall_time_coarse_nsec;
-       } while (unlikely(gtod_read_retry(gtod, seq)));
-}
-
-notrace static void do_monotonic_coarse(struct timespec *ts)
-{
-       unsigned long seq;
-       do {
-               seq = gtod_read_begin(gtod);
-               ts->tv_sec = gtod->monotonic_time_coarse_sec;
-               ts->tv_nsec = gtod->monotonic_time_coarse_nsec;
-       } while (unlikely(gtod_read_retry(gtod, seq)));
-}
-
-notrace int __vdso_clock_gettime(clockid_t clock, struct timespec *ts)
-{
-       switch (clock) {
-       case CLOCK_REALTIME:
-               if (do_realtime(ts) == VCLOCK_NONE)
-                       goto fallback;
-               break;
-       case CLOCK_MONOTONIC:
-               if (do_monotonic(ts) == VCLOCK_NONE)
-                       goto fallback;
-               break;
-       case CLOCK_REALTIME_COARSE:
-               do_realtime_coarse(ts);
-               break;
-       case CLOCK_MONOTONIC_COARSE:
-               do_monotonic_coarse(ts);
-               break;
-       default:
-               goto fallback;
-       }
-
-       return 0;
-fallback:
-       return vdso_fallback_gettime(clock, ts);
-}
-int clock_gettime(clockid_t, struct timespec *)
-       __attribute__((weak, alias("__vdso_clock_gettime")));
-
-notrace int __vdso_gettimeofday(struct timeval *tv, struct timezone *tz)
-{
-       if (likely(tv != NULL)) {
-               if (unlikely(do_realtime((struct timespec *)tv) == VCLOCK_NONE))
-                       return vdso_fallback_gtod(tv, tz);
-               tv->tv_usec /= 1000;
-       }
-       if (unlikely(tz != NULL)) {
-               tz->tz_minuteswest = gtod->tz_minuteswest;
-               tz->tz_dsttime = gtod->tz_dsttime;
-       }
-
-       return 0;
-}
-int gettimeofday(struct timeval *, struct timezone *)
-       __attribute__((weak, alias("__vdso_gettimeofday")));
-
-/*
- * This will break when the xtime seconds get inaccurate, but that is
- * unlikely
- */
-notrace time_t __vdso_time(time_t *t)
-{
-       /* This is atomic on x86 so we don't need any locks. */
-       time_t result = ACCESS_ONCE(gtod->wall_time_sec);
-
-       if (t)
-               *t = result;
-       return result;
-}
-int time(time_t *t)
-       __attribute__((weak, alias("__vdso_time")));
diff --git a/arch/x86/vdso/vdso-layout.lds.S b/arch/x86/vdso/vdso-layout.lds.S
deleted file mode 100644 (file)
index de2c921..0000000
+++ /dev/null
@@ -1,118 +0,0 @@
-#include <asm/vdso.h>
-
-/*
- * Linker script for vDSO.  This is an ELF shared object prelinked to
- * its virtual address, and with only one read-only segment.
- * This script controls its layout.
- */
-
-#if defined(BUILD_VDSO64)
-# define SHDR_SIZE 64
-#elif defined(BUILD_VDSO32) || defined(BUILD_VDSOX32)
-# define SHDR_SIZE 40
-#else
-# error unknown VDSO target
-#endif
-
-#define NUM_FAKE_SHDRS 13
-
-SECTIONS
-{
-       /*
-        * User/kernel shared data is before the vDSO.  This may be a little
-        * uglier than putting it after the vDSO, but it avoids issues with
-        * non-allocatable things that dangle past the end of the PT_LOAD
-        * segment.
-        */
-
-       vvar_start = . - 2 * PAGE_SIZE;
-       vvar_page = vvar_start;
-
-       /* Place all vvars at the offsets in asm/vvar.h. */
-#define EMIT_VVAR(name, offset) vvar_ ## name = vvar_page + offset;
-#define __VVAR_KERNEL_LDS
-#include <asm/vvar.h>
-#undef __VVAR_KERNEL_LDS
-#undef EMIT_VVAR
-
-       hpet_page = vvar_start + PAGE_SIZE;
-
-       . = SIZEOF_HEADERS;
-
-       .hash           : { *(.hash) }                  :text
-       .gnu.hash       : { *(.gnu.hash) }
-       .dynsym         : { *(.dynsym) }
-       .dynstr         : { *(.dynstr) }
-       .gnu.version    : { *(.gnu.version) }
-       .gnu.version_d  : { *(.gnu.version_d) }
-       .gnu.version_r  : { *(.gnu.version_r) }
-
-       .dynamic        : { *(.dynamic) }               :text   :dynamic
-
-       .rodata         : {
-               *(.rodata*)
-               *(.data*)
-               *(.sdata*)
-               *(.got.plt) *(.got)
-               *(.gnu.linkonce.d.*)
-               *(.bss*)
-               *(.dynbss*)
-               *(.gnu.linkonce.b.*)
-
-               /*
-                * Ideally this would live in a C file, but that won't
-                * work cleanly for x32 until we start building the x32
-                * C code using an x32 toolchain.
-                */
-               VDSO_FAKE_SECTION_TABLE_START = .;
-               . = . + NUM_FAKE_SHDRS * SHDR_SIZE;
-               VDSO_FAKE_SECTION_TABLE_END = .;
-       }                                               :text
-
-       .fake_shstrtab  : { *(.fake_shstrtab) }         :text
-
-
-       .note           : { *(.note.*) }                :text   :note
-
-       .eh_frame_hdr   : { *(.eh_frame_hdr) }          :text   :eh_frame_hdr
-       .eh_frame       : { KEEP (*(.eh_frame)) }       :text
-
-
-       /*
-        * Text is well-separated from actual data: there's plenty of
-        * stuff that isn't used at runtime in between.
-        */
-
-       .text           : { *(.text*) }                 :text   =0x90909090,
-
-       /*
-        * At the end so that eu-elflint stays happy when vdso2c strips
-        * these.  A better implementation would avoid allocating space
-        * for these.
-        */
-       .altinstructions        : { *(.altinstructions) }       :text
-       .altinstr_replacement   : { *(.altinstr_replacement) }  :text
-
-       /DISCARD/ : {
-               *(.discard)
-               *(.discard.*)
-               *(__bug_table)
-       }
-}
-
-/*
- * Very old versions of ld do not recognize this name token; use the constant.
- */
-#define PT_GNU_EH_FRAME        0x6474e550
-
-/*
- * We must supply the ELF program headers explicitly to get just one
- * PT_LOAD segment, and set the flags explicitly to make segments read-only.
- */
-PHDRS
-{
-       text            PT_LOAD         FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
-       dynamic         PT_DYNAMIC      FLAGS(4);               /* PF_R */
-       note            PT_NOTE         FLAGS(4);               /* PF_R */
-       eh_frame_hdr    PT_GNU_EH_FRAME;
-}
diff --git a/arch/x86/vdso/vdso-note.S b/arch/x86/vdso/vdso-note.S
deleted file mode 100644 (file)
index 79a071e..0000000
+++ /dev/null
@@ -1,12 +0,0 @@
-/*
- * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
- * Here we can supply some information useful to userland.
- */
-
-#include <linux/uts.h>
-#include <linux/version.h>
-#include <linux/elfnote.h>
-
-ELFNOTE_START(Linux, 0, "a")
-       .long LINUX_VERSION_CODE
-ELFNOTE_END
diff --git a/arch/x86/vdso/vdso.lds.S b/arch/x86/vdso/vdso.lds.S
deleted file mode 100644 (file)
index 6807932..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-/*
- * Linker script for 64-bit vDSO.
- * We #include the file to define the layout details.
- *
- * This file defines the version script giving the user-exported symbols in
- * the DSO.
- */
-
-#define BUILD_VDSO64
-
-#include "vdso-layout.lds.S"
-
-/*
- * This controls what userland symbols we export from the vDSO.
- */
-VERSION {
-       LINUX_2.6 {
-       global:
-               clock_gettime;
-               __vdso_clock_gettime;
-               gettimeofday;
-               __vdso_gettimeofday;
-               getcpu;
-               __vdso_getcpu;
-               time;
-               __vdso_time;
-       local: *;
-       };
-}
diff --git a/arch/x86/vdso/vdso2c.c b/arch/x86/vdso/vdso2c.c
deleted file mode 100644 (file)
index 8627db2..0000000
+++ /dev/null
@@ -1,253 +0,0 @@
-/*
- * vdso2c - A vdso image preparation tool
- * Copyright (c) 2014 Andy Lutomirski and others
- * Licensed under the GPL v2
- *
- * vdso2c requires stripped and unstripped input.  It would be trivial
- * to fully strip the input in here, but, for reasons described below,
- * we need to write a section table.  Doing this is more or less
- * equivalent to dropping all non-allocatable sections, but it's
- * easier to let objcopy handle that instead of doing it ourselves.
- * If we ever need to do something fancier than what objcopy provides,
- * it would be straightforward to add here.
- *
- * We're keep a section table for a few reasons:
- *
- * The Go runtime had a couple of bugs: it would read the section
- * table to try to figure out how many dynamic symbols there were (it
- * shouldn't have looked at the section table at all) and, if there
- * were no SHT_SYNDYM section table entry, it would use an
- * uninitialized value for the number of symbols.  An empty DYNSYM
- * table would work, but I see no reason not to write a valid one (and
- * keep full performance for old Go programs).  This hack is only
- * needed on x86_64.
- *
- * The bug was introduced on 2012-08-31 by:
- * https://code.google.com/p/go/source/detail?r=56ea40aac72b
- * and was fixed on 2014-06-13 by:
- * https://code.google.com/p/go/source/detail?r=fc1cd5e12595
- *
- * Binutils has issues debugging the vDSO: it reads the section table to
- * find SHT_NOTE; it won't look at PT_NOTE for the in-memory vDSO, which
- * would break build-id if we removed the section table.  Binutils
- * also requires that shstrndx != 0.  See:
- * https://sourceware.org/bugzilla/show_bug.cgi?id=17064
- *
- * elfutils might not look for PT_NOTE if there is a section table at
- * all.  I don't know whether this matters for any practical purpose.
- *
- * For simplicity, rather than hacking up a partial section table, we
- * just write a mostly complete one.  We omit non-dynamic symbols,
- * though, since they're rather large.
- *
- * Once binutils gets fixed, we might be able to drop this for all but
- * the 64-bit vdso, since build-id only works in kernel RPMs, and
- * systems that update to new enough kernel RPMs will likely update
- * binutils in sync.  build-id has never worked for home-built kernel
- * RPMs without manual symlinking, and I suspect that no one ever does
- * that.
- */
-
-#include <inttypes.h>
-#include <stdint.h>
-#include <unistd.h>
-#include <stdarg.h>
-#include <stdlib.h>
-#include <stdio.h>
-#include <string.h>
-#include <fcntl.h>
-#include <err.h>
-
-#include <sys/mman.h>
-#include <sys/types.h>
-
-#include <tools/le_byteshift.h>
-
-#include <linux/elf.h>
-#include <linux/types.h>
-
-const char *outfilename;
-
-/* Symbols that we need in vdso2c. */
-enum {
-       sym_vvar_start,
-       sym_vvar_page,
-       sym_hpet_page,
-       sym_VDSO_FAKE_SECTION_TABLE_START,
-       sym_VDSO_FAKE_SECTION_TABLE_END,
-};
-
-const int special_pages[] = {
-       sym_vvar_page,
-       sym_hpet_page,
-};
-
-struct vdso_sym {
-       const char *name;
-       bool export;
-};
-
-struct vdso_sym required_syms[] = {
-       [sym_vvar_start] = {"vvar_start", true},
-       [sym_vvar_page] = {"vvar_page", true},
-       [sym_hpet_page] = {"hpet_page", true},
-       [sym_VDSO_FAKE_SECTION_TABLE_START] = {
-               "VDSO_FAKE_SECTION_TABLE_START", false
-       },
-       [sym_VDSO_FAKE_SECTION_TABLE_END] = {
-               "VDSO_FAKE_SECTION_TABLE_END", false
-       },
-       {"VDSO32_NOTE_MASK", true},
-       {"VDSO32_SYSENTER_RETURN", true},
-       {"__kernel_vsyscall", true},
-       {"__kernel_sigreturn", true},
-       {"__kernel_rt_sigreturn", true},
-};
-
-__attribute__((format(printf, 1, 2))) __attribute__((noreturn))
-static void fail(const char *format, ...)
-{
-       va_list ap;
-       va_start(ap, format);
-       fprintf(stderr, "Error: ");
-       vfprintf(stderr, format, ap);
-       if (outfilename)
-               unlink(outfilename);
-       exit(1);
-       va_end(ap);
-}
-
-/*
- * Evil macros for little-endian reads and writes
- */
-#define GLE(x, bits, ifnot)                                            \
-       __builtin_choose_expr(                                          \
-               (sizeof(*(x)) == bits/8),                               \
-               (__typeof__(*(x)))get_unaligned_le##bits(x), ifnot)
-
-extern void bad_get_le(void);
-#define LAST_GLE(x)                                                    \
-       __builtin_choose_expr(sizeof(*(x)) == 1, *(x), bad_get_le())
-
-#define GET_LE(x)                                                      \
-       GLE(x, 64, GLE(x, 32, GLE(x, 16, LAST_GLE(x))))
-
-#define PLE(x, val, bits, ifnot)                                       \
-       __builtin_choose_expr(                                          \
-               (sizeof(*(x)) == bits/8),                               \
-               put_unaligned_le##bits((val), (x)), ifnot)
-
-extern void bad_put_le(void);
-#define LAST_PLE(x, val)                                               \
-       __builtin_choose_expr(sizeof(*(x)) == 1, *(x) = (val), bad_put_le())
-
-#define PUT_LE(x, val)                                 \
-       PLE(x, val, 64, PLE(x, val, 32, PLE(x, val, 16, LAST_PLE(x, val))))
-
-
-#define NSYMS (sizeof(required_syms) / sizeof(required_syms[0]))
-
-#define BITSFUNC3(name, bits, suffix) name##bits##suffix
-#define BITSFUNC2(name, bits, suffix) BITSFUNC3(name, bits, suffix)
-#define BITSFUNC(name) BITSFUNC2(name, ELF_BITS, )
-
-#define INT_BITS BITSFUNC2(int, ELF_BITS, _t)
-
-#define ELF_BITS_XFORM2(bits, x) Elf##bits##_##x
-#define ELF_BITS_XFORM(bits, x) ELF_BITS_XFORM2(bits, x)
-#define ELF(x) ELF_BITS_XFORM(ELF_BITS, x)
-
-#define ELF_BITS 64
-#include "vdso2c.h"
-#undef ELF_BITS
-
-#define ELF_BITS 32
-#include "vdso2c.h"
-#undef ELF_BITS
-
-static void go(void *raw_addr, size_t raw_len,
-              void *stripped_addr, size_t stripped_len,
-              FILE *outfile, const char *name)
-{
-       Elf64_Ehdr *hdr = (Elf64_Ehdr *)raw_addr;
-
-       if (hdr->e_ident[EI_CLASS] == ELFCLASS64) {
-               go64(raw_addr, raw_len, stripped_addr, stripped_len,
-                    outfile, name);
-       } else if (hdr->e_ident[EI_CLASS] == ELFCLASS32) {
-               go32(raw_addr, raw_len, stripped_addr, stripped_len,
-                    outfile, name);
-       } else {
-               fail("unknown ELF class\n");
-       }
-}
-
-static void map_input(const char *name, void **addr, size_t *len, int prot)
-{
-       off_t tmp_len;
-
-       int fd = open(name, O_RDONLY);
-       if (fd == -1)
-               err(1, "%s", name);
-
-       tmp_len = lseek(fd, 0, SEEK_END);
-       if (tmp_len == (off_t)-1)
-               err(1, "lseek");
-       *len = (size_t)tmp_len;
-
-       *addr = mmap(NULL, tmp_len, prot, MAP_PRIVATE, fd, 0);
-       if (*addr == MAP_FAILED)
-               err(1, "mmap");
-
-       close(fd);
-}
-
-int main(int argc, char **argv)
-{
-       size_t raw_len, stripped_len;
-       void *raw_addr, *stripped_addr;
-       FILE *outfile;
-       char *name, *tmp;
-       int namelen;
-
-       if (argc != 4) {
-               printf("Usage: vdso2c RAW_INPUT STRIPPED_INPUT OUTPUT\n");
-               return 1;
-       }
-
-       /*
-        * Figure out the struct name.  If we're writing to a .so file,
-        * generate raw output insted.
-        */
-       name = strdup(argv[3]);
-       namelen = strlen(name);
-       if (namelen >= 3 && !strcmp(name + namelen - 3, ".so")) {
-               name = NULL;
-       } else {
-               tmp = strrchr(name, '/');
-               if (tmp)
-                       name = tmp + 1;
-               tmp = strchr(name, '.');
-               if (tmp)
-                       *tmp = '\0';
-               for (tmp = name; *tmp; tmp++)
-                       if (*tmp == '-')
-                               *tmp = '_';
-       }
-
-       map_input(argv[1], &raw_addr, &raw_len, PROT_READ);
-       map_input(argv[2], &stripped_addr, &stripped_len, PROT_READ);
-
-       outfilename = argv[3];
-       outfile = fopen(outfilename, "w");
-       if (!outfile)
-               err(1, "%s", argv[2]);
-
-       go(raw_addr, raw_len, stripped_addr, stripped_len, outfile, name);
-
-       munmap(raw_addr, raw_len);
-       munmap(stripped_addr, stripped_len);
-       fclose(outfile);
-
-       return 0;
-}
diff --git a/arch/x86/vdso/vdso2c.h b/arch/x86/vdso/vdso2c.h
deleted file mode 100644 (file)
index 0224987..0000000
+++ /dev/null
@@ -1,175 +0,0 @@
-/*
- * This file is included twice from vdso2c.c.  It generates code for 32-bit
- * and 64-bit vDSOs.  We need both for 64-bit builds, since 32-bit vDSOs
- * are built for 32-bit userspace.
- */
-
-static void BITSFUNC(go)(void *raw_addr, size_t raw_len,
-                        void *stripped_addr, size_t stripped_len,
-                        FILE *outfile, const char *name)
-{
-       int found_load = 0;
-       unsigned long load_size = -1;  /* Work around bogus warning */
-       unsigned long mapping_size;
-       ELF(Ehdr) *hdr = (ELF(Ehdr) *)raw_addr;
-       int i;
-       unsigned long j;
-       ELF(Shdr) *symtab_hdr = NULL, *strtab_hdr, *secstrings_hdr,
-               *alt_sec = NULL;
-       ELF(Dyn) *dyn = 0, *dyn_end = 0;
-       const char *secstrings;
-       INT_BITS syms[NSYMS] = {};
-
-       ELF(Phdr) *pt = (ELF(Phdr) *)(raw_addr + GET_LE(&hdr->e_phoff));
-
-       /* Walk the segment table. */
-       for (i = 0; i < GET_LE(&hdr->e_phnum); i++) {
-               if (GET_LE(&pt[i].p_type) == PT_LOAD) {
-                       if (found_load)
-                               fail("multiple PT_LOAD segs\n");
-
-                       if (GET_LE(&pt[i].p_offset) != 0 ||
-                           GET_LE(&pt[i].p_vaddr) != 0)
-                               fail("PT_LOAD in wrong place\n");
-
-                       if (GET_LE(&pt[i].p_memsz) != GET_LE(&pt[i].p_filesz))
-                               fail("cannot handle memsz != filesz\n");
-
-                       load_size = GET_LE(&pt[i].p_memsz);
-                       found_load = 1;
-               } else if (GET_LE(&pt[i].p_type) == PT_DYNAMIC) {
-                       dyn = raw_addr + GET_LE(&pt[i].p_offset);
-                       dyn_end = raw_addr + GET_LE(&pt[i].p_offset) +
-                               GET_LE(&pt[i].p_memsz);
-               }
-       }
-       if (!found_load)
-               fail("no PT_LOAD seg\n");
-
-       if (stripped_len < load_size)
-               fail("stripped input is too short\n");
-
-       /* Walk the dynamic table */
-       for (i = 0; dyn + i < dyn_end &&
-                    GET_LE(&dyn[i].d_tag) != DT_NULL; i++) {
-               typeof(dyn[i].d_tag) tag = GET_LE(&dyn[i].d_tag);
-               if (tag == DT_REL || tag == DT_RELSZ || tag == DT_RELA ||
-                   tag == DT_RELENT || tag == DT_TEXTREL)
-                       fail("vdso image contains dynamic relocations\n");
-       }
-
-       /* Walk the section table */
-       secstrings_hdr = raw_addr + GET_LE(&hdr->e_shoff) +
-               GET_LE(&hdr->e_shentsize)*GET_LE(&hdr->e_shstrndx);
-       secstrings = raw_addr + GET_LE(&secstrings_hdr->sh_offset);
-       for (i = 0; i < GET_LE(&hdr->e_shnum); i++) {
-               ELF(Shdr) *sh = raw_addr + GET_LE(&hdr->e_shoff) +
-                       GET_LE(&hdr->e_shentsize) * i;
-               if (GET_LE(&sh->sh_type) == SHT_SYMTAB)
-                       symtab_hdr = sh;
-
-               if (!strcmp(secstrings + GET_LE(&sh->sh_name),
-                           ".altinstructions"))
-                       alt_sec = sh;
-       }
-
-       if (!symtab_hdr)
-               fail("no symbol table\n");
-
-       strtab_hdr = raw_addr + GET_LE(&hdr->e_shoff) +
-               GET_LE(&hdr->e_shentsize) * GET_LE(&symtab_hdr->sh_link);
-
-       /* Walk the symbol table */
-       for (i = 0;
-            i < GET_LE(&symtab_hdr->sh_size) / GET_LE(&symtab_hdr->sh_entsize);
-            i++) {
-               int k;
-               ELF(Sym) *sym = raw_addr + GET_LE(&symtab_hdr->sh_offset) +
-                       GET_LE(&symtab_hdr->sh_entsize) * i;
-               const char *name = raw_addr + GET_LE(&strtab_hdr->sh_offset) +
-                       GET_LE(&sym->st_name);
-
-               for (k = 0; k < NSYMS; k++) {
-                       if (!strcmp(name, required_syms[k].name)) {
-                               if (syms[k]) {
-                                       fail("duplicate symbol %s\n",
-                                            required_syms[k].name);
-                               }
-
-                               /*
-                                * Careful: we use negative addresses, but
-                                * st_value is unsigned, so we rely
-                                * on syms[k] being a signed type of the
-                                * correct width.
-                                */
-                               syms[k] = GET_LE(&sym->st_value);
-                       }
-               }
-       }
-
-       /* Validate mapping addresses. */
-       for (i = 0; i < sizeof(special_pages) / sizeof(special_pages[0]); i++) {
-               INT_BITS symval = syms[special_pages[i]];
-
-               if (!symval)
-                       continue;  /* The mapping isn't used; ignore it. */
-
-               if (symval % 4096)
-                       fail("%s must be a multiple of 4096\n",
-                            required_syms[i].name);
-               if (symval + 4096 < syms[sym_vvar_start])
-                       fail("%s underruns vvar_start\n",
-                            required_syms[i].name);
-               if (symval + 4096 > 0)
-                       fail("%s is on the wrong side of the vdso text\n",
-                            required_syms[i].name);
-       }
-       if (syms[sym_vvar_start] % 4096)
-               fail("vvar_begin must be a multiple of 4096\n");
-
-       if (!name) {
-               fwrite(stripped_addr, stripped_len, 1, outfile);
-               return;
-       }
-
-       mapping_size = (stripped_len + 4095) / 4096 * 4096;
-
-       fprintf(outfile, "/* AUTOMATICALLY GENERATED -- DO NOT EDIT */\n\n");
-       fprintf(outfile, "#include <linux/linkage.h>\n");
-       fprintf(outfile, "#include <asm/page_types.h>\n");
-       fprintf(outfile, "#include <asm/vdso.h>\n");
-       fprintf(outfile, "\n");
-       fprintf(outfile,
-               "static unsigned char raw_data[%lu] __page_aligned_data = {",
-               mapping_size);
-       for (j = 0; j < stripped_len; j++) {
-               if (j % 10 == 0)
-                       fprintf(outfile, "\n\t");
-               fprintf(outfile, "0x%02X, ",
-                       (int)((unsigned char *)stripped_addr)[j]);
-       }
-       fprintf(outfile, "\n};\n\n");
-
-       fprintf(outfile, "static struct page *pages[%lu];\n\n",
-               mapping_size / 4096);
-
-       fprintf(outfile, "const struct vdso_image %s = {\n", name);
-       fprintf(outfile, "\t.data = raw_data,\n");
-       fprintf(outfile, "\t.size = %lu,\n", mapping_size);
-       fprintf(outfile, "\t.text_mapping = {\n");
-       fprintf(outfile, "\t\t.name = \"[vdso]\",\n");
-       fprintf(outfile, "\t\t.pages = pages,\n");
-       fprintf(outfile, "\t},\n");
-       if (alt_sec) {
-               fprintf(outfile, "\t.alt = %lu,\n",
-                       (unsigned long)GET_LE(&alt_sec->sh_offset));
-               fprintf(outfile, "\t.alt_len = %lu,\n",
-                       (unsigned long)GET_LE(&alt_sec->sh_size));
-       }
-       for (i = 0; i < NSYMS; i++) {
-               if (required_syms[i].export && syms[i])
-                       fprintf(outfile, "\t.sym_%s = %" PRIi64 ",\n",
-                               required_syms[i].name, (int64_t)syms[i]);
-       }
-       fprintf(outfile, "};\n");
-}
diff --git a/arch/x86/vdso/vdso32-setup.c b/arch/x86/vdso/vdso32-setup.c
deleted file mode 100644 (file)
index e904c27..0000000
+++ /dev/null
@@ -1,120 +0,0 @@
-/*
- * (C) Copyright 2002 Linus Torvalds
- * Portions based on the vdso-randomization code from exec-shield:
- * Copyright(C) 2005-2006, Red Hat, Inc., Ingo Molnar
- *
- * This file contains the needed initializations to support sysenter.
- */
-
-#include <linux/init.h>
-#include <linux/smp.h>
-#include <linux/kernel.h>
-#include <linux/mm_types.h>
-
-#include <asm/cpufeature.h>
-#include <asm/processor.h>
-#include <asm/vdso.h>
-
-#ifdef CONFIG_COMPAT_VDSO
-#define VDSO_DEFAULT   0
-#else
-#define VDSO_DEFAULT   1
-#endif
-
-/*
- * Should the kernel map a VDSO page into processes and pass its
- * address down to glibc upon exec()?
- */
-unsigned int __read_mostly vdso32_enabled = VDSO_DEFAULT;
-
-static int __init vdso32_setup(char *s)
-{
-       vdso32_enabled = simple_strtoul(s, NULL, 0);
-
-       if (vdso32_enabled > 1)
-               pr_warn("vdso32 values other than 0 and 1 are no longer allowed; vdso disabled\n");
-
-       return 1;
-}
-
-/*
- * For consistency, the argument vdso32=[012] affects the 32-bit vDSO
- * behavior on both 64-bit and 32-bit kernels.
- * On 32-bit kernels, vdso=[012] means the same thing.
- */
-__setup("vdso32=", vdso32_setup);
-
-#ifdef CONFIG_X86_32
-__setup_param("vdso=", vdso_setup, vdso32_setup, 0);
-#endif
-
-#ifdef CONFIG_X86_64
-
-#define        vdso32_sysenter()       (boot_cpu_has(X86_FEATURE_SYSENTER32))
-#define        vdso32_syscall()        (boot_cpu_has(X86_FEATURE_SYSCALL32))
-
-#else  /* CONFIG_X86_32 */
-
-#define vdso32_sysenter()      (boot_cpu_has(X86_FEATURE_SEP))
-#define vdso32_syscall()       (0)
-
-#endif /* CONFIG_X86_64 */
-
-#if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
-const struct vdso_image *selected_vdso32;
-#endif
-
-int __init sysenter_setup(void)
-{
-#ifdef CONFIG_COMPAT
-       if (vdso32_syscall())
-               selected_vdso32 = &vdso_image_32_syscall;
-       else
-#endif
-       if (vdso32_sysenter())
-               selected_vdso32 = &vdso_image_32_sysenter;
-       else
-               selected_vdso32 = &vdso_image_32_int80;
-
-       init_vdso_image(selected_vdso32);
-
-       return 0;
-}
-
-#ifdef CONFIG_X86_64
-
-subsys_initcall(sysenter_setup);
-
-#ifdef CONFIG_SYSCTL
-/* Register vsyscall32 into the ABI table */
-#include <linux/sysctl.h>
-
-static struct ctl_table abi_table2[] = {
-       {
-               .procname       = "vsyscall32",
-               .data           = &vdso32_enabled,
-               .maxlen         = sizeof(int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec
-       },
-       {}
-};
-
-static struct ctl_table abi_root_table2[] = {
-       {
-               .procname = "abi",
-               .mode = 0555,
-               .child = abi_table2
-       },
-       {}
-};
-
-static __init int ia32_binfmt_init(void)
-{
-       register_sysctl_table(abi_root_table2);
-       return 0;
-}
-__initcall(ia32_binfmt_init);
-#endif /* CONFIG_SYSCTL */
-
-#endif /* CONFIG_X86_64 */
diff --git a/arch/x86/vdso/vdso32/.gitignore b/arch/x86/vdso/vdso32/.gitignore
deleted file mode 100644 (file)
index e45fba9..0000000
+++ /dev/null
@@ -1 +0,0 @@
-vdso32.lds
diff --git a/arch/x86/vdso/vdso32/int80.S b/arch/x86/vdso/vdso32/int80.S
deleted file mode 100644 (file)
index b15b7c0..0000000
+++ /dev/null
@@ -1,56 +0,0 @@
-/*
- * Code for the vDSO.  This version uses the old int $0x80 method.
- *
- * First get the common code for the sigreturn entry points.
- * This must come first.
- */
-#include "sigreturn.S"
-
-       .text
-       .globl __kernel_vsyscall
-       .type __kernel_vsyscall,@function
-       ALIGN
-__kernel_vsyscall:
-.LSTART_vsyscall:
-       int $0x80
-       ret
-.LEND_vsyscall:
-       .size __kernel_vsyscall,.-.LSTART_vsyscall
-       .previous
-
-       .section .eh_frame,"a",@progbits
-.LSTARTFRAMEDLSI:
-       .long .LENDCIEDLSI-.LSTARTCIEDLSI
-.LSTARTCIEDLSI:
-       .long 0                 /* CIE ID */
-       .byte 1                 /* Version number */
-       .string "zR"            /* NUL-terminated augmentation string */
-       .uleb128 1              /* Code alignment factor */
-       .sleb128 -4             /* Data alignment factor */
-       .byte 8                 /* Return address register column */
-       .uleb128 1              /* Augmentation value length */
-       .byte 0x1b              /* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
-       .byte 0x0c              /* DW_CFA_def_cfa */
-       .uleb128 4
-       .uleb128 4
-       .byte 0x88              /* DW_CFA_offset, column 0x8 */
-       .uleb128 1
-       .align 4
-.LENDCIEDLSI:
-       .long .LENDFDEDLSI-.LSTARTFDEDLSI /* Length FDE */
-.LSTARTFDEDLSI:
-       .long .LSTARTFDEDLSI-.LSTARTFRAMEDLSI /* CIE pointer */
-       .long .LSTART_vsyscall-.        /* PC-relative start address */
-       .long .LEND_vsyscall-.LSTART_vsyscall
-       .uleb128 0
-       .align 4
-.LENDFDEDLSI:
-       .previous
-
-       /*
-        * Pad out the segment to match the size of the sysenter.S version.
-        */
-VDSO32_vsyscall_eh_frame_size = 0x40
-       .section .data,"aw",@progbits
-       .space VDSO32_vsyscall_eh_frame_size-(.LENDFDEDLSI-.LSTARTFRAMEDLSI), 0
-       .previous
diff --git a/arch/x86/vdso/vdso32/note.S b/arch/x86/vdso/vdso32/note.S
deleted file mode 100644 (file)
index c83f257..0000000
+++ /dev/null
@@ -1,44 +0,0 @@
-/*
- * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
- * Here we can supply some information useful to userland.
- */
-
-#include <linux/version.h>
-#include <linux/elfnote.h>
-
-/* Ideally this would use UTS_NAME, but using a quoted string here
-   doesn't work. Remember to change this when changing the
-   kernel's name. */
-ELFNOTE_START(Linux, 0, "a")
-       .long LINUX_VERSION_CODE
-ELFNOTE_END
-
-#ifdef CONFIG_XEN
-/*
- * Add a special note telling glibc's dynamic linker a fake hardware
- * flavor that it will use to choose the search path for libraries in the
- * same way it uses real hardware capabilities like "mmx".
- * We supply "nosegneg" as the fake capability, to indicate that we
- * do not like negative offsets in instructions using segment overrides,
- * since we implement those inefficiently.  This makes it possible to
- * install libraries optimized to avoid those access patterns in someplace
- * like /lib/i686/tls/nosegneg.  Note that an /etc/ld.so.conf.d/file
- * corresponding to the bits here is needed to make ldconfig work right.
- * It should contain:
- *     hwcap 1 nosegneg
- * to match the mapping of bit to name that we give here.
- *
- * At runtime, the fake hardware feature will be considered to be present
- * if its bit is set in the mask word.  So, we start with the mask 0, and
- * at boot time we set VDSO_NOTE_NONEGSEG_BIT if running under Xen.
- */
-
-#include "../../xen/vdso.h"    /* Defines VDSO_NOTE_NONEGSEG_BIT.  */
-
-ELFNOTE_START(GNU, 2, "a")
-       .long 1                 /* ncaps */
-VDSO32_NOTE_MASK:              /* Symbol used by arch/x86/xen/setup.c */
-       .long 0                 /* mask */
-       .byte VDSO_NOTE_NONEGSEG_BIT; .asciz "nosegneg" /* bit, name */
-ELFNOTE_END
-#endif
diff --git a/arch/x86/vdso/vdso32/sigreturn.S b/arch/x86/vdso/vdso32/sigreturn.S
deleted file mode 100644 (file)
index d7ec4e2..0000000
+++ /dev/null
@@ -1,145 +0,0 @@
-/*
- * Common code for the sigreturn entry points in vDSO images.
- * So far this code is the same for both int80 and sysenter versions.
- * This file is #include'd by int80.S et al to define them first thing.
- * The kernel assumes that the addresses of these routines are constant
- * for all vDSO implementations.
- */
-
-#include <linux/linkage.h>
-#include <asm/unistd_32.h>
-#include <asm/asm-offsets.h>
-
-#ifndef SYSCALL_ENTER_KERNEL
-#define        SYSCALL_ENTER_KERNEL    int $0x80
-#endif
-
-       .text
-       .globl __kernel_sigreturn
-       .type __kernel_sigreturn,@function
-       nop /* this guy is needed for .LSTARTFDEDLSI1 below (watch for HACK) */
-       ALIGN
-__kernel_sigreturn:
-.LSTART_sigreturn:
-       popl %eax               /* XXX does this mean it needs unwind info? */
-       movl $__NR_sigreturn, %eax
-       SYSCALL_ENTER_KERNEL
-.LEND_sigreturn:
-       nop
-       .size __kernel_sigreturn,.-.LSTART_sigreturn
-
-       .globl __kernel_rt_sigreturn
-       .type __kernel_rt_sigreturn,@function
-       ALIGN
-__kernel_rt_sigreturn:
-.LSTART_rt_sigreturn:
-       movl $__NR_rt_sigreturn, %eax
-       SYSCALL_ENTER_KERNEL
-.LEND_rt_sigreturn:
-       nop
-       .size __kernel_rt_sigreturn,.-.LSTART_rt_sigreturn
-       .previous
-
-       .section .eh_frame,"a",@progbits
-.LSTARTFRAMEDLSI1:
-       .long .LENDCIEDLSI1-.LSTARTCIEDLSI1
-.LSTARTCIEDLSI1:
-       .long 0                 /* CIE ID */
-       .byte 1                 /* Version number */
-       .string "zRS"           /* NUL-terminated augmentation string */
-       .uleb128 1              /* Code alignment factor */
-       .sleb128 -4             /* Data alignment factor */
-       .byte 8                 /* Return address register column */
-       .uleb128 1              /* Augmentation value length */
-       .byte 0x1b              /* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
-       .byte 0                 /* DW_CFA_nop */
-       .align 4
-.LENDCIEDLSI1:
-       .long .LENDFDEDLSI1-.LSTARTFDEDLSI1 /* Length FDE */
-.LSTARTFDEDLSI1:
-       .long .LSTARTFDEDLSI1-.LSTARTFRAMEDLSI1 /* CIE pointer */
-       /* HACK: The dwarf2 unwind routines will subtract 1 from the
-          return address to get an address in the middle of the
-          presumed call instruction.  Since we didn't get here via
-          a call, we need to include the nop before the real start
-          to make up for it.  */
-       .long .LSTART_sigreturn-1-.     /* PC-relative start address */
-       .long .LEND_sigreturn-.LSTART_sigreturn+1
-       .uleb128 0                      /* Augmentation */
-       /* What follows are the instructions for the table generation.
-          We record the locations of each register saved.  This is
-          complicated by the fact that the "CFA" is always assumed to
-          be the value of the stack pointer in the caller.  This means
-          that we must define the CFA of this body of code to be the
-          saved value of the stack pointer in the sigcontext.  Which
-          also means that there is no fixed relation to the other
-          saved registers, which means that we must use DW_CFA_expression
-          to compute their addresses.  It also means that when we
-          adjust the stack with the popl, we have to do it all over again.  */
-
-#define do_cfa_expr(offset)                                            \
-       .byte 0x0f;                     /* DW_CFA_def_cfa_expression */ \
-       .uleb128 1f-0f;                 /*   length */                  \
-0:     .byte 0x74;                     /*     DW_OP_breg4 */           \
-       .sleb128 offset;                /*      offset */               \
-       .byte 0x06;                     /*     DW_OP_deref */           \
-1:
-
-#define do_expr(regno, offset)                                         \
-       .byte 0x10;                     /* DW_CFA_expression */         \
-       .uleb128 regno;                 /*   regno */                   \
-       .uleb128 1f-0f;                 /*   length */                  \
-0:     .byte 0x74;                     /*     DW_OP_breg4 */           \
-       .sleb128 offset;                /*       offset */              \
-1:
-
-       do_cfa_expr(IA32_SIGCONTEXT_sp+4)
-       do_expr(0, IA32_SIGCONTEXT_ax+4)
-       do_expr(1, IA32_SIGCONTEXT_cx+4)
-       do_expr(2, IA32_SIGCONTEXT_dx+4)
-       do_expr(3, IA32_SIGCONTEXT_bx+4)
-       do_expr(5, IA32_SIGCONTEXT_bp+4)
-       do_expr(6, IA32_SIGCONTEXT_si+4)
-       do_expr(7, IA32_SIGCONTEXT_di+4)
-       do_expr(8, IA32_SIGCONTEXT_ip+4)
-
-       .byte 0x42      /* DW_CFA_advance_loc 2 -- nop; popl eax. */
-
-       do_cfa_expr(IA32_SIGCONTEXT_sp)
-       do_expr(0, IA32_SIGCONTEXT_ax)
-       do_expr(1, IA32_SIGCONTEXT_cx)
-       do_expr(2, IA32_SIGCONTEXT_dx)
-       do_expr(3, IA32_SIGCONTEXT_bx)
-       do_expr(5, IA32_SIGCONTEXT_bp)
-       do_expr(6, IA32_SIGCONTEXT_si)
-       do_expr(7, IA32_SIGCONTEXT_di)
-       do_expr(8, IA32_SIGCONTEXT_ip)
-
-       .align 4
-.LENDFDEDLSI1:
-
-       .long .LENDFDEDLSI2-.LSTARTFDEDLSI2 /* Length FDE */
-.LSTARTFDEDLSI2:
-       .long .LSTARTFDEDLSI2-.LSTARTFRAMEDLSI1 /* CIE pointer */
-       /* HACK: See above wrt unwind library assumptions.  */
-       .long .LSTART_rt_sigreturn-1-.  /* PC-relative start address */
-       .long .LEND_rt_sigreturn-.LSTART_rt_sigreturn+1
-       .uleb128 0                      /* Augmentation */
-       /* What follows are the instructions for the table generation.
-          We record the locations of each register saved.  This is
-          slightly less complicated than the above, since we don't
-          modify the stack pointer in the process.  */
-
-       do_cfa_expr(IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_sp)
-       do_expr(0, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_ax)
-       do_expr(1, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_cx)
-       do_expr(2, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_dx)
-       do_expr(3, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_bx)
-       do_expr(5, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_bp)
-       do_expr(6, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_si)
-       do_expr(7, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_di)
-       do_expr(8, IA32_RT_SIGFRAME_sigcontext-4 + IA32_SIGCONTEXT_ip)
-
-       .align 4
-.LENDFDEDLSI2:
-       .previous
diff --git a/arch/x86/vdso/vdso32/syscall.S b/arch/x86/vdso/vdso32/syscall.S
deleted file mode 100644 (file)
index 6b286bb..0000000
+++ /dev/null
@@ -1,75 +0,0 @@
-/*
- * Code for the vDSO.  This version uses the syscall instruction.
- *
- * First get the common code for the sigreturn entry points.
- * This must come first.
- */
-#define SYSCALL_ENTER_KERNEL   syscall
-#include "sigreturn.S"
-
-#include <asm/segment.h>
-
-       .text
-       .globl __kernel_vsyscall
-       .type __kernel_vsyscall,@function
-       ALIGN
-__kernel_vsyscall:
-.LSTART_vsyscall:
-       push    %ebp
-.Lpush_ebp:
-       movl    %ecx, %ebp
-       syscall
-       movl    %ebp, %ecx
-       popl    %ebp
-.Lpop_ebp:
-       ret
-.LEND_vsyscall:
-       .size __kernel_vsyscall,.-.LSTART_vsyscall
-
-       .section .eh_frame,"a",@progbits
-.LSTARTFRAME:
-       .long .LENDCIE-.LSTARTCIE
-.LSTARTCIE:
-       .long 0                 /* CIE ID */
-       .byte 1                 /* Version number */
-       .string "zR"            /* NUL-terminated augmentation string */
-       .uleb128 1              /* Code alignment factor */
-       .sleb128 -4             /* Data alignment factor */
-       .byte 8                 /* Return address register column */
-       .uleb128 1              /* Augmentation value length */
-       .byte 0x1b              /* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
-       .byte 0x0c              /* DW_CFA_def_cfa */
-       .uleb128 4
-       .uleb128 4
-       .byte 0x88              /* DW_CFA_offset, column 0x8 */
-       .uleb128 1
-       .align 4
-.LENDCIE:
-
-       .long .LENDFDE1-.LSTARTFDE1     /* Length FDE */
-.LSTARTFDE1:
-       .long .LSTARTFDE1-.LSTARTFRAME  /* CIE pointer */
-       .long .LSTART_vsyscall-.        /* PC-relative start address */
-       .long .LEND_vsyscall-.LSTART_vsyscall
-       .uleb128 0                      /* Augmentation length */
-       /* What follows are the instructions for the table generation.
-          We have to record all changes of the stack pointer.  */
-       .byte 0x40 + .Lpush_ebp-.LSTART_vsyscall /* DW_CFA_advance_loc */
-       .byte 0x0e              /* DW_CFA_def_cfa_offset */
-       .uleb128 8
-       .byte 0x85, 0x02        /* DW_CFA_offset %ebp -8 */
-       .byte 0x40 + .Lpop_ebp-.Lpush_ebp /* DW_CFA_advance_loc */
-       .byte 0xc5              /* DW_CFA_restore %ebp */
-       .byte 0x0e              /* DW_CFA_def_cfa_offset */
-       .uleb128 4
-       .align 4
-.LENDFDE1:
-       .previous
-
-       /*
-        * Pad out the segment to match the size of the sysenter.S version.
-        */
-VDSO32_vsyscall_eh_frame_size = 0x40
-       .section .data,"aw",@progbits
-       .space VDSO32_vsyscall_eh_frame_size-(.LENDFDE1-.LSTARTFRAME), 0
-       .previous
diff --git a/arch/x86/vdso/vdso32/sysenter.S b/arch/x86/vdso/vdso32/sysenter.S
deleted file mode 100644 (file)
index e354bce..0000000
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Code for the vDSO.  This version uses the sysenter instruction.
- *
- * First get the common code for the sigreturn entry points.
- * This must come first.
- */
-#include "sigreturn.S"
-
-/*
- * The caller puts arg2 in %ecx, which gets pushed. The kernel will use
- * %ecx itself for arg2. The pushing is because the sysexit instruction
- * (found in entry.S) requires that we clobber %ecx with the desired %esp.
- * User code might expect that %ecx is unclobbered though, as it would be
- * for returning via the iret instruction, so we must push and pop.
- *
- * The caller puts arg3 in %edx, which the sysexit instruction requires
- * for %eip. Thus, exactly as for arg2, we must push and pop.
- *
- * Arg6 is different. The caller puts arg6 in %ebp. Since the sysenter
- * instruction clobbers %esp, the user's %esp won't even survive entry
- * into the kernel. We store %esp in %ebp. Code in entry.S must fetch
- * arg6 from the stack.
- *
- * You can not use this vsyscall for the clone() syscall because the
- * three words on the parent stack do not get copied to the child.
- */
-       .text
-       .globl __kernel_vsyscall
-       .type __kernel_vsyscall,@function
-       ALIGN
-__kernel_vsyscall:
-.LSTART_vsyscall:
-       push %ecx
-.Lpush_ecx:
-       push %edx
-.Lpush_edx:
-       push %ebp
-.Lenter_kernel:
-       movl %esp,%ebp
-       sysenter
-
-       /* 7: align return point with nop's to make disassembly easier */
-       .space 7,0x90
-
-       /* 14: System call restart point is here! (SYSENTER_RETURN-2) */
-       int $0x80
-       /* 16: System call normal return point is here! */
-VDSO32_SYSENTER_RETURN:        /* Symbol used by sysenter.c via vdso32-syms.h */
-       pop %ebp
-.Lpop_ebp:
-       pop %edx
-.Lpop_edx:
-       pop %ecx
-.Lpop_ecx:
-       ret
-.LEND_vsyscall:
-       .size __kernel_vsyscall,.-.LSTART_vsyscall
-       .previous
-
-       .section .eh_frame,"a",@progbits
-.LSTARTFRAMEDLSI:
-       .long .LENDCIEDLSI-.LSTARTCIEDLSI
-.LSTARTCIEDLSI:
-       .long 0                 /* CIE ID */
-       .byte 1                 /* Version number */
-       .string "zR"            /* NUL-terminated augmentation string */
-       .uleb128 1              /* Code alignment factor */
-       .sleb128 -4             /* Data alignment factor */
-       .byte 8                 /* Return address register column */
-       .uleb128 1              /* Augmentation value length */
-       .byte 0x1b              /* DW_EH_PE_pcrel|DW_EH_PE_sdata4. */
-       .byte 0x0c              /* DW_CFA_def_cfa */
-       .uleb128 4
-       .uleb128 4
-       .byte 0x88              /* DW_CFA_offset, column 0x8 */
-       .uleb128 1
-       .align 4
-.LENDCIEDLSI:
-       .long .LENDFDEDLSI-.LSTARTFDEDLSI /* Length FDE */
-.LSTARTFDEDLSI:
-       .long .LSTARTFDEDLSI-.LSTARTFRAMEDLSI /* CIE pointer */
-       .long .LSTART_vsyscall-.        /* PC-relative start address */
-       .long .LEND_vsyscall-.LSTART_vsyscall
-       .uleb128 0
-       /* What follows are the instructions for the table generation.
-          We have to record all changes of the stack pointer.  */
-       .byte 0x40 + (.Lpush_ecx-.LSTART_vsyscall) /* DW_CFA_advance_loc */
-       .byte 0x0e              /* DW_CFA_def_cfa_offset */
-       .byte 0x08              /* RA at offset 8 now */
-       .byte 0x40 + (.Lpush_edx-.Lpush_ecx) /* DW_CFA_advance_loc */
-       .byte 0x0e              /* DW_CFA_def_cfa_offset */
-       .byte 0x0c              /* RA at offset 12 now */
-       .byte 0x40 + (.Lenter_kernel-.Lpush_edx) /* DW_CFA_advance_loc */
-       .byte 0x0e              /* DW_CFA_def_cfa_offset */
-       .byte 0x10              /* RA at offset 16 now */
-       .byte 0x85, 0x04        /* DW_CFA_offset %ebp -16 */
-       /* Finally the epilogue.  */
-       .byte 0x40 + (.Lpop_ebp-.Lenter_kernel) /* DW_CFA_advance_loc */
-       .byte 0x0e              /* DW_CFA_def_cfa_offset */
-       .byte 0x0c              /* RA at offset 12 now */
-       .byte 0xc5              /* DW_CFA_restore %ebp */
-       .byte 0x40 + (.Lpop_edx-.Lpop_ebp) /* DW_CFA_advance_loc */
-       .byte 0x0e              /* DW_CFA_def_cfa_offset */
-       .byte 0x08              /* RA at offset 8 now */
-       .byte 0x40 + (.Lpop_ecx-.Lpop_edx) /* DW_CFA_advance_loc */
-       .byte 0x0e              /* DW_CFA_def_cfa_offset */
-       .byte 0x04              /* RA at offset 4 now */
-       .align 4
-.LENDFDEDLSI:
-       .previous
-
-       /*
-        * Emit a symbol with the size of this .eh_frame data,
-        * to verify it matches the other versions.
-        */
-VDSO32_vsyscall_eh_frame_size = (.LENDFDEDLSI-.LSTARTFRAMEDLSI)
diff --git a/arch/x86/vdso/vdso32/vclock_gettime.c b/arch/x86/vdso/vdso32/vclock_gettime.c
deleted file mode 100644 (file)
index 175cc72..0000000
+++ /dev/null
@@ -1,30 +0,0 @@
-#define BUILD_VDSO32
-
-#ifndef CONFIG_CC_OPTIMIZE_FOR_SIZE
-#undef CONFIG_OPTIMIZE_INLINING
-#endif
-
-#undef CONFIG_X86_PPRO_FENCE
-
-#ifdef CONFIG_X86_64
-
-/*
- * in case of a 32 bit VDSO for a 64 bit kernel fake a 32 bit kernel
- * configuration
- */
-#undef CONFIG_64BIT
-#undef CONFIG_X86_64
-#undef CONFIG_ILLEGAL_POINTER_VALUE
-#undef CONFIG_SPARSEMEM_VMEMMAP
-#undef CONFIG_NR_CPUS
-
-#define CONFIG_X86_32 1
-#define CONFIG_PAGE_OFFSET 0
-#define CONFIG_ILLEGAL_POINTER_VALUE 0
-#define CONFIG_NR_CPUS 1
-
-#define BUILD_VDSO32_64
-
-#endif
-
-#include "../vclock_gettime.c"
diff --git a/arch/x86/vdso/vdso32/vdso-fakesections.c b/arch/x86/vdso/vdso32/vdso-fakesections.c
deleted file mode 100644 (file)
index 541468e..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include "../vdso-fakesections.c"
diff --git a/arch/x86/vdso/vdso32/vdso32.lds.S b/arch/x86/vdso/vdso32/vdso32.lds.S
deleted file mode 100644 (file)
index 31056cf..0000000
+++ /dev/null
@@ -1,37 +0,0 @@
-/*
- * Linker script for 32-bit vDSO.
- * We #include the file to define the layout details.
- *
- * This file defines the version script giving the user-exported symbols in
- * the DSO.
- */
-
-#include <asm/page.h>
-
-#define BUILD_VDSO32
-
-#include "../vdso-layout.lds.S"
-
-/* The ELF entry point can be used to set the AT_SYSINFO value.  */
-ENTRY(__kernel_vsyscall);
-
-/*
- * This controls what userland symbols we export from the vDSO.
- */
-VERSION
-{
-       LINUX_2.6 {
-       global:
-               __vdso_clock_gettime;
-               __vdso_gettimeofday;
-               __vdso_time;
-       };
-
-       LINUX_2.5 {
-       global:
-               __kernel_vsyscall;
-               __kernel_sigreturn;
-               __kernel_rt_sigreturn;
-       local: *;
-       };
-}
diff --git a/arch/x86/vdso/vdsox32.lds.S b/arch/x86/vdso/vdsox32.lds.S
deleted file mode 100644 (file)
index 697c11e..0000000
+++ /dev/null
@@ -1,25 +0,0 @@
-/*
- * Linker script for x32 vDSO.
- * We #include the file to define the layout details.
- *
- * This file defines the version script giving the user-exported symbols in
- * the DSO.
- */
-
-#define BUILD_VDSOX32
-
-#include "vdso-layout.lds.S"
-
-/*
- * This controls what userland symbols we export from the vDSO.
- */
-VERSION {
-       LINUX_2.6 {
-       global:
-               __vdso_clock_gettime;
-               __vdso_gettimeofday;
-               __vdso_getcpu;
-               __vdso_time;
-       local: *;
-       };
-}
diff --git a/arch/x86/vdso/vgetcpu.c b/arch/x86/vdso/vgetcpu.c
deleted file mode 100644 (file)
index 8ec3d1f..0000000
+++ /dev/null
@@ -1,28 +0,0 @@
-/*
- * Copyright 2006 Andi Kleen, SUSE Labs.
- * Subject to the GNU Public License, v.2
- *
- * Fast user context implementation of getcpu()
- */
-
-#include <linux/kernel.h>
-#include <linux/getcpu.h>
-#include <linux/time.h>
-#include <asm/vgtod.h>
-
-notrace long
-__vdso_getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *unused)
-{
-       unsigned int p;
-
-       p = __getcpu();
-
-       if (cpu)
-               *cpu = p & VGETCPU_CPU_MASK;
-       if (node)
-               *node = p >> 12;
-       return 0;
-}
-
-long getcpu(unsigned *cpu, unsigned *node, struct getcpu_cache *tcache)
-       __attribute__((weak, alias("__vdso_getcpu")));
diff --git a/arch/x86/vdso/vma.c b/arch/x86/vdso/vma.c
deleted file mode 100644 (file)
index 1c9f750..0000000
+++ /dev/null
@@ -1,300 +0,0 @@
-/*
- * Copyright 2007 Andi Kleen, SUSE Labs.
- * Subject to the GPL, v.2
- *
- * This contains most of the x86 vDSO kernel-side code.
- */
-#include <linux/mm.h>
-#include <linux/err.h>
-#include <linux/sched.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/random.h>
-#include <linux/elf.h>
-#include <linux/cpu.h>
-#include <asm/vgtod.h>
-#include <asm/proto.h>
-#include <asm/vdso.h>
-#include <asm/vvar.h>
-#include <asm/page.h>
-#include <asm/hpet.h>
-#include <asm/desc.h>
-
-#if defined(CONFIG_X86_64)
-unsigned int __read_mostly vdso64_enabled = 1;
-#endif
-
-void __init init_vdso_image(const struct vdso_image *image)
-{
-       int i;
-       int npages = (image->size) / PAGE_SIZE;
-
-       BUG_ON(image->size % PAGE_SIZE != 0);
-       for (i = 0; i < npages; i++)
-               image->text_mapping.pages[i] =
-                       virt_to_page(image->data + i*PAGE_SIZE);
-
-       apply_alternatives((struct alt_instr *)(image->data + image->alt),
-                          (struct alt_instr *)(image->data + image->alt +
-                                               image->alt_len));
-}
-
-struct linux_binprm;
-
-/*
- * Put the vdso above the (randomized) stack with another randomized
- * offset.  This way there is no hole in the middle of address space.
- * To save memory make sure it is still in the same PTE as the stack
- * top.  This doesn't give that many random bits.
- *
- * Note that this algorithm is imperfect: the distribution of the vdso
- * start address within a PMD is biased toward the end.
- *
- * Only used for the 64-bit and x32 vdsos.
- */
-static unsigned long vdso_addr(unsigned long start, unsigned len)
-{
-#ifdef CONFIG_X86_32
-       return 0;
-#else
-       unsigned long addr, end;
-       unsigned offset;
-
-       /*
-        * Round up the start address.  It can start out unaligned as a result
-        * of stack start randomization.
-        */
-       start = PAGE_ALIGN(start);
-
-       /* Round the lowest possible end address up to a PMD boundary. */
-       end = (start + len + PMD_SIZE - 1) & PMD_MASK;
-       if (end >= TASK_SIZE_MAX)
-               end = TASK_SIZE_MAX;
-       end -= len;
-
-       if (end > start) {
-               offset = get_random_int() % (((end - start) >> PAGE_SHIFT) + 1);
-               addr = start + (offset << PAGE_SHIFT);
-       } else {
-               addr = start;
-       }
-
-       /*
-        * Forcibly align the final address in case we have a hardware
-        * issue that requires alignment for performance reasons.
-        */
-       addr = align_vdso_addr(addr);
-
-       return addr;
-#endif
-}
-
-static int map_vdso(const struct vdso_image *image, bool calculate_addr)
-{
-       struct mm_struct *mm = current->mm;
-       struct vm_area_struct *vma;
-       unsigned long addr, text_start;
-       int ret = 0;
-       static struct page *no_pages[] = {NULL};
-       static struct vm_special_mapping vvar_mapping = {
-               .name = "[vvar]",
-               .pages = no_pages,
-       };
-
-       if (calculate_addr) {
-               addr = vdso_addr(current->mm->start_stack,
-                                image->size - image->sym_vvar_start);
-       } else {
-               addr = 0;
-       }
-
-       down_write(&mm->mmap_sem);
-
-       addr = get_unmapped_area(NULL, addr,
-                                image->size - image->sym_vvar_start, 0, 0);
-       if (IS_ERR_VALUE(addr)) {
-               ret = addr;
-               goto up_fail;
-       }
-
-       text_start = addr - image->sym_vvar_start;
-       current->mm->context.vdso = (void __user *)text_start;
-
-       /*
-        * MAYWRITE to allow gdb to COW and set breakpoints
-        */
-       vma = _install_special_mapping(mm,
-                                      text_start,
-                                      image->size,
-                                      VM_READ|VM_EXEC|
-                                      VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
-                                      &image->text_mapping);
-
-       if (IS_ERR(vma)) {
-               ret = PTR_ERR(vma);
-               goto up_fail;
-       }
-
-       vma = _install_special_mapping(mm,
-                                      addr,
-                                      -image->sym_vvar_start,
-                                      VM_READ|VM_MAYREAD,
-                                      &vvar_mapping);
-
-       if (IS_ERR(vma)) {
-               ret = PTR_ERR(vma);
-               goto up_fail;
-       }
-
-       if (image->sym_vvar_page)
-               ret = remap_pfn_range(vma,
-                                     text_start + image->sym_vvar_page,
-                                     __pa_symbol(&__vvar_page) >> PAGE_SHIFT,
-                                     PAGE_SIZE,
-                                     PAGE_READONLY);
-
-       if (ret)
-               goto up_fail;
-
-#ifdef CONFIG_HPET_TIMER
-       if (hpet_address && image->sym_hpet_page) {
-               ret = io_remap_pfn_range(vma,
-                       text_start + image->sym_hpet_page,
-                       hpet_address >> PAGE_SHIFT,
-                       PAGE_SIZE,
-                       pgprot_noncached(PAGE_READONLY));
-
-               if (ret)
-                       goto up_fail;
-       }
-#endif
-
-up_fail:
-       if (ret)
-               current->mm->context.vdso = NULL;
-
-       up_write(&mm->mmap_sem);
-       return ret;
-}
-
-#if defined(CONFIG_X86_32) || defined(CONFIG_COMPAT)
-static int load_vdso32(void)
-{
-       int ret;
-
-       if (vdso32_enabled != 1)  /* Other values all mean "disabled" */
-               return 0;
-
-       ret = map_vdso(selected_vdso32, false);
-       if (ret)
-               return ret;
-
-       if (selected_vdso32->sym_VDSO32_SYSENTER_RETURN)
-               current_thread_info()->sysenter_return =
-                       current->mm->context.vdso +
-                       selected_vdso32->sym_VDSO32_SYSENTER_RETURN;
-
-       return 0;
-}
-#endif
-
-#ifdef CONFIG_X86_64
-int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
-{
-       if (!vdso64_enabled)
-               return 0;
-
-       return map_vdso(&vdso_image_64, true);
-}
-
-#ifdef CONFIG_COMPAT
-int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
-                                      int uses_interp)
-{
-#ifdef CONFIG_X86_X32_ABI
-       if (test_thread_flag(TIF_X32)) {
-               if (!vdso64_enabled)
-                       return 0;
-
-               return map_vdso(&vdso_image_x32, true);
-       }
-#endif
-
-       return load_vdso32();
-}
-#endif
-#else
-int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
-{
-       return load_vdso32();
-}
-#endif
-
-#ifdef CONFIG_X86_64
-static __init int vdso_setup(char *s)
-{
-       vdso64_enabled = simple_strtoul(s, NULL, 0);
-       return 0;
-}
-__setup("vdso=", vdso_setup);
-#endif
-
-#ifdef CONFIG_X86_64
-static void vgetcpu_cpu_init(void *arg)
-{
-       int cpu = smp_processor_id();
-       struct desc_struct d = { };
-       unsigned long node = 0;
-#ifdef CONFIG_NUMA
-       node = cpu_to_node(cpu);
-#endif
-       if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
-               write_rdtscp_aux((node << 12) | cpu);
-
-       /*
-        * Store cpu number in limit so that it can be loaded
-        * quickly in user space in vgetcpu. (12 bits for the CPU
-        * and 8 bits for the node)
-        */
-       d.limit0 = cpu | ((node & 0xf) << 12);
-       d.limit = node >> 4;
-       d.type = 5;             /* RO data, expand down, accessed */
-       d.dpl = 3;              /* Visible to user code */
-       d.s = 1;                /* Not a system segment */
-       d.p = 1;                /* Present */
-       d.d = 1;                /* 32-bit */
-
-       write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
-}
-
-static int
-vgetcpu_cpu_notifier(struct notifier_block *n, unsigned long action, void *arg)
-{
-       long cpu = (long)arg;
-
-       if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
-               smp_call_function_single(cpu, vgetcpu_cpu_init, NULL, 1);
-
-       return NOTIFY_DONE;
-}
-
-static int __init init_vdso(void)
-{
-       init_vdso_image(&vdso_image_64);
-
-#ifdef CONFIG_X86_X32_ABI
-       init_vdso_image(&vdso_image_x32);
-#endif
-
-       cpu_notifier_register_begin();
-
-       on_each_cpu(vgetcpu_cpu_init, NULL, 1);
-       /* notifier priority > KVM */
-       __hotcpu_notifier(vgetcpu_cpu_notifier, 30);
-
-       cpu_notifier_register_done();
-
-       return 0;
-}
-subsys_initcall(init_vdso);
-#endif /* CONFIG_X86_64 */
index 46957ead3060eecb5e76b6f6daf3b498a6b6a5e9..0b95c9b8283fe2afe885d9a8ae98393c14ecc498 100644 (file)
@@ -1181,10 +1181,11 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = {
        .read_tscp = native_read_tscp,
 
        .iret = xen_iret,
-       .irq_enable_sysexit = xen_sysexit,
 #ifdef CONFIG_X86_64
        .usergs_sysret32 = xen_sysret32,
        .usergs_sysret64 = xen_sysret64,
+#else
+       .irq_enable_sysexit = xen_sysexit,
 #endif
 
        .load_tr_desc = paravirt_nop,
@@ -1423,7 +1424,7 @@ static void xen_pvh_set_cr_flags(int cpu)
                return;
        /*
         * For BSP, PSE PGE are set in probe_page_size_mask(), for APs
-        * set them here. For all, OSFXSR OSXMMEXCPT are set in fpu_init.
+        * set them here. For all, OSFXSR OSXMMEXCPT are set in fpu__init_cpu().
        */
        if (cpu_has_pse)
                cr4_set_bits_and_update_boot(X86_CR4_PSE);
@@ -1467,6 +1468,7 @@ asmlinkage __visible void __init xen_start_kernel(void)
 {
        struct physdev_set_iopl set_iopl;
        unsigned long initrd_start = 0;
+       u64 pat;
        int rc;
 
        if (!xen_start_info)
@@ -1574,8 +1576,8 @@ asmlinkage __visible void __init xen_start_kernel(void)
         * Modify the cache mode translation tables to match Xen's PAT
         * configuration.
         */
-
-       pat_init_cache_modes();
+       rdmsrl(MSR_IA32_CR_PAT, pat);
+       pat_init_cache_modes(pat);
 
        /* keep using Xen gdt for now; no urgent need to change it */
 
index b47124d4cd67e29199fae1c48f9a94ecd93b00f7..8b7f18e200aa4a453d8ae60d02b85b4141abc677 100644 (file)
@@ -67,6 +67,7 @@
 #include <linux/seq_file.h>
 #include <linux/bootmem.h>
 #include <linux/slab.h>
+#include <linux/vmalloc.h>
 
 #include <asm/cache.h>
 #include <asm/setup.h>
index 956374c1edbc31e4c1eb50c3fb29cb8828ad44b5..9e2ba5c6e1dd7be4a0b10a70b315cf5f0f20c081 100644 (file)
 #include "xen-ops.h"
 #include "debugfs.h"
 
+static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
+static DEFINE_PER_CPU(char *, irq_name);
+static bool xen_pvspin = true;
+
+#ifdef CONFIG_QUEUED_SPINLOCKS
+
+#include <asm/qspinlock.h>
+
+static void xen_qlock_kick(int cpu)
+{
+       xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR);
+}
+
+/*
+ * Halt the current CPU & release it back to the host
+ */
+static void xen_qlock_wait(u8 *byte, u8 val)
+{
+       int irq = __this_cpu_read(lock_kicker_irq);
+
+       /* If kicker interrupts not initialized yet, just spin */
+       if (irq == -1)
+               return;
+
+       /* clear pending */
+       xen_clear_irq_pending(irq);
+       barrier();
+
+       /*
+        * We check the byte value after clearing pending IRQ to make sure
+        * that we won't miss a wakeup event because of the clearing.
+        *
+        * The sync_clear_bit() call in xen_clear_irq_pending() is atomic.
+        * So it is effectively a memory barrier for x86.
+        */
+       if (READ_ONCE(*byte) != val)
+               return;
+
+       /*
+        * If an interrupt happens here, it will leave the wakeup irq
+        * pending, which will cause xen_poll_irq() to return
+        * immediately.
+        */
+
+       /* Block until irq becomes pending (or perhaps a spurious wakeup) */
+       xen_poll_irq(irq);
+}
+
+#else /* CONFIG_QUEUED_SPINLOCKS */
+
 enum xen_contention_stat {
        TAKEN_SLOW,
        TAKEN_SLOW_PICKUP,
@@ -100,12 +150,9 @@ struct xen_lock_waiting {
        __ticket_t want;
 };
 
-static DEFINE_PER_CPU(int, lock_kicker_irq) = -1;
-static DEFINE_PER_CPU(char *, irq_name);
 static DEFINE_PER_CPU(struct xen_lock_waiting, lock_waiting);
 static cpumask_t waiting_cpus;
 
-static bool xen_pvspin = true;
 __visible void xen_lock_spinning(struct arch_spinlock *lock, __ticket_t want)
 {
        int irq = __this_cpu_read(lock_kicker_irq);
@@ -217,6 +264,7 @@ static void xen_unlock_kick(struct arch_spinlock *lock, __ticket_t next)
                }
        }
 }
+#endif /* CONFIG_QUEUED_SPINLOCKS */
 
 static irqreturn_t dummy_handler(int irq, void *dev_id)
 {
@@ -280,8 +328,16 @@ void __init xen_init_spinlocks(void)
                return;
        }
        printk(KERN_DEBUG "xen: PV spinlocks enabled\n");
+#ifdef CONFIG_QUEUED_SPINLOCKS
+       __pv_init_lock_hash();
+       pv_lock_ops.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath;
+       pv_lock_ops.queued_spin_unlock = PV_CALLEE_SAVE(__pv_queued_spin_unlock);
+       pv_lock_ops.wait = xen_qlock_wait;
+       pv_lock_ops.kick = xen_qlock_kick;
+#else
        pv_lock_ops.lock_spinning = PV_CALLEE_SAVE(xen_lock_spinning);
        pv_lock_ops.unlock_kick = xen_unlock_kick;
+#endif
 }
 
 /*
@@ -310,7 +366,7 @@ static __init int xen_parse_nopvspin(char *arg)
 }
 early_param("xen_nopvspin", xen_parse_nopvspin);
 
-#ifdef CONFIG_XEN_DEBUG_FS
+#if defined(CONFIG_XEN_DEBUG_FS) && !defined(CONFIG_QUEUED_SPINLOCKS)
 
 static struct dentry *d_spin_debug;
 
index 985fc3ee0973c85f916c67cd9a40fc9c2c73d340..f22667abf7b9d54d475edd08ffcdac8ba79b4cf3 100644 (file)
@@ -15,6 +15,8 @@
 #include <asm/percpu.h>
 #include <asm/processor-flags.h>
 #include <asm/segment.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
 
 #include <xen/interface/xen.h>
 
@@ -47,29 +49,13 @@ ENTRY(xen_iret)
 ENDPATCH(xen_iret)
 RELOC(xen_iret, 1b+1)
 
-/*
- * sysexit is not used for 64-bit processes, so it's only ever used to
- * return to 32-bit compat userspace.
- */
-ENTRY(xen_sysexit)
-       pushq $__USER32_DS
-       pushq %rcx
-       pushq $X86_EFLAGS_IF
-       pushq $__USER32_CS
-       pushq %rdx
-
-       pushq $0
-1:     jmp hypercall_iret
-ENDPATCH(xen_sysexit)
-RELOC(xen_sysexit, 1b+1)
-
 ENTRY(xen_sysret64)
        /*
         * We're already on the usermode stack at this point, but
         * still with the kernel gs, so we can easily switch back
         */
        movq %rsp, PER_CPU_VAR(rsp_scratch)
-       movq PER_CPU_VAR(kernel_stack), %rsp
+       movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
 
        pushq $__USER_DS
        pushq PER_CPU_VAR(rsp_scratch)
@@ -88,7 +74,7 @@ ENTRY(xen_sysret32)
         * still with the kernel gs, so we can easily switch back
         */
        movq %rsp, PER_CPU_VAR(rsp_scratch)
-       movq PER_CPU_VAR(kernel_stack), %rsp
+       movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp
 
        pushq $__USER32_DS
        pushq PER_CPU_VAR(rsp_scratch)
@@ -128,7 +114,7 @@ RELOC(xen_sysret32, 1b+1)
 /* Normal 64-bit system call target */
 ENTRY(xen_syscall_target)
        undo_xen_syscall
-       jmp system_call_after_swapgs
+       jmp entry_SYSCALL_64_after_swapgs
 ENDPROC(xen_syscall_target)
 
 #ifdef CONFIG_IA32_EMULATION
@@ -136,13 +122,13 @@ ENDPROC(xen_syscall_target)
 /* 32-bit compat syscall target */
 ENTRY(xen_syscall32_target)
        undo_xen_syscall
-       jmp ia32_cstar_target
+       jmp entry_SYSCALL_compat
 ENDPROC(xen_syscall32_target)
 
 /* 32-bit compat sysenter target */
 ENTRY(xen_sysenter_target)
        undo_xen_syscall
-       jmp ia32_sysenter_target
+       jmp entry_SYSENTER_compat
 ENDPROC(xen_sysenter_target)
 
 #else /* !CONFIG_IA32_EMULATION */
index 9e195c683549dc138d77e98687c34f62b40eda76..c20fe29e65f48b4706789e0ad59bb33b1a3acc18 100644 (file)
@@ -134,7 +134,9 @@ DECL_ASM(void, xen_restore_fl_direct, unsigned long);
 
 /* These are not functions, and cannot be called normally */
 __visible void xen_iret(void);
+#ifdef CONFIG_X86_32
 __visible void xen_sysexit(void);
+#endif
 __visible void xen_sysret32(void);
 __visible void xen_sysret64(void);
 __visible void xen_adjust_exception_frame(void);
index 172a02a6ad146fea24ab966cf46a3612434a03da..ba78ccf651e7764e9db92cfca37927a8d68e3892 100644 (file)
@@ -185,4 +185,17 @@ static inline int dma_get_sgtable(struct device *dev, struct sg_table *sgt,
        return -EINVAL;
 }
 
+static inline void *dma_alloc_attrs(struct device *dev, size_t size,
+                                   dma_addr_t *dma_handle, gfp_t flag,
+                                   struct dma_attrs *attrs)
+{
+       return NULL;
+}
+
+static inline void dma_free_attrs(struct device *dev, size_t size,
+                                 void *vaddr, dma_addr_t dma_handle,
+                                 struct dma_attrs *attrs)
+{
+}
+
 #endif /* _XTENSA_DMA_MAPPING_H */
index fe1600a094384e8c18a988e91862b531031984f3..c39bb6e61911e3577274bc4794a08084d4035aaa 100644 (file)
@@ -59,6 +59,7 @@ static inline void __iomem *ioremap_cache(unsigned long offset,
 }
 
 #define ioremap_wc ioremap_nocache
+#define ioremap_wt ioremap_nocache
 
 static inline void __iomem *ioremap(unsigned long offset, unsigned long size)
 {
index 9e3571a6535c3b1bbc8535195ee40405fe9c42c0..83a44a33cfa11221f99ba5f8d836e02a0c4c9f92 100644 (file)
 #include <linux/mm.h>
 #include <linux/module.h>
 #include <linux/hardirq.h>
+#include <linux/uaccess.h>
 #include <asm/mmu_context.h>
 #include <asm/cacheflush.h>
 #include <asm/hardirq.h>
-#include <asm/uaccess.h>
 #include <asm/pgalloc.h>
 
 DEFINE_PER_CPU(unsigned long, asid_cache) = ASID_USER_FIRST;
@@ -57,7 +57,7 @@ void do_page_fault(struct pt_regs *regs)
        /* If we're in an interrupt or have no user
         * context, we must not take the fault..
         */
-       if (in_atomic() || !mm) {
+       if (faulthandler_disabled() || !mm) {
                bad_page_fault(regs, address, SIGSEGV);
                return;
        }
index 8cfb71ec0937369a8adf79a3d6e754f589cbba5f..184ceadccc1a3aca946b3c9e609a2efb91046807 100644 (file)
@@ -42,6 +42,7 @@ void *kmap_atomic(struct page *page)
        enum fixed_addresses idx;
        unsigned long vaddr;
 
+       preempt_disable();
        pagefault_disable();
        if (!PageHighMem(page))
                return page_address(page);
@@ -79,6 +80,7 @@ void __kunmap_atomic(void *kvaddr)
        }
 
        pagefault_enable();
+       preempt_enable();
 }
 EXPORT_SYMBOL(__kunmap_atomic);
 
index fd154b94447a25788f48d5e8cc04bc803d1efdb8..03b5f8d77f37b4cbad3a12f3a98f9c3ea63a50e7 100644 (file)
@@ -552,6 +552,8 @@ void blk_cleanup_queue(struct request_queue *q)
                q->queue_lock = &q->__queue_lock;
        spin_unlock_irq(lock);
 
+       bdi_destroy(&q->backing_dev_info);
+
        /* @q is and will stay empty, shutdown and put */
        blk_put_queue(q);
 }
@@ -732,6 +734,8 @@ blk_init_queue_node(request_fn_proc *rfn, spinlock_t *lock, int node_id)
 }
 EXPORT_SYMBOL(blk_init_queue_node);
 
+static void blk_queue_bio(struct request_queue *q, struct bio *bio);
+
 struct request_queue *
 blk_init_allocated_queue(struct request_queue *q, request_fn_proc *rfn,
                         spinlock_t *lock)
@@ -1576,7 +1580,7 @@ void init_request_from_bio(struct request *req, struct bio *bio)
        blk_rq_bio_prep(req->q, req, bio);
 }
 
-void blk_queue_bio(struct request_queue *q, struct bio *bio)
+static void blk_queue_bio(struct request_queue *q, struct bio *bio)
 {
        const bool sync = !!(bio->bi_rw & REQ_SYNC);
        struct blk_plug *plug;
@@ -1684,7 +1688,6 @@ out_unlock:
                spin_unlock_irq(q->queue_lock);
        }
 }
-EXPORT_SYMBOL_GPL(blk_queue_bio);      /* for device mapper only */
 
 /*
  * If bio->bi_dev is a partition, remap the location
index 5f13f4d0bcceda747589a300170537eefb4810eb..1e28ddb656b891b92d7c135fa65914939b1451aa 100644 (file)
@@ -24,7 +24,7 @@ static int get_first_sibling(unsigned int cpu)
 {
        unsigned int ret;
 
-       ret = cpumask_first(topology_thread_cpumask(cpu));
+       ret = cpumask_first(topology_sibling_cpumask(cpu));
        if (ret < nr_cpu_ids)
                return ret;
 
index ade8a2d1b0aa8600ad31413b59db37392628bffc..594eea04266e6d05f7256255552a1c4c72c664f3 100644 (file)
@@ -677,8 +677,11 @@ static void blk_mq_rq_timer(unsigned long priv)
                data.next = blk_rq_timeout(round_jiffies_up(data.next));
                mod_timer(&q->timeout, data.next);
        } else {
-               queue_for_each_hw_ctx(q, hctx, i)
-                       blk_mq_tag_idle(hctx);
+               queue_for_each_hw_ctx(q, hctx, i) {
+                       /* the hctx may be unmapped, so check it here */
+                       if (blk_mq_hw_queue_mapped(hctx))
+                               blk_mq_tag_idle(hctx);
+               }
        }
 }
 
@@ -855,6 +858,16 @@ static void __blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx)
                spin_lock(&hctx->lock);
                list_splice(&rq_list, &hctx->dispatch);
                spin_unlock(&hctx->lock);
+               /*
+                * the queue is expected stopped with BLK_MQ_RQ_QUEUE_BUSY, but
+                * it's possible the queue is stopped and restarted again
+                * before this. Queue restart will dispatch requests. And since
+                * requests in rq_list aren't added into hctx->dispatch yet,
+                * the requests in rq_list might get lost.
+                *
+                * blk_mq_run_hw_queue() already checks the STOPPED bit
+                **/
+               blk_mq_run_hw_queue(hctx, true);
        }
 }
 
@@ -1571,22 +1584,6 @@ static int blk_mq_hctx_cpu_offline(struct blk_mq_hw_ctx *hctx, int cpu)
        return NOTIFY_OK;
 }
 
-static int blk_mq_hctx_cpu_online(struct blk_mq_hw_ctx *hctx, int cpu)
-{
-       struct request_queue *q = hctx->queue;
-       struct blk_mq_tag_set *set = q->tag_set;
-
-       if (set->tags[hctx->queue_num])
-               return NOTIFY_OK;
-
-       set->tags[hctx->queue_num] = blk_mq_init_rq_map(set, hctx->queue_num);
-       if (!set->tags[hctx->queue_num])
-               return NOTIFY_STOP;
-
-       hctx->tags = set->tags[hctx->queue_num];
-       return NOTIFY_OK;
-}
-
 static int blk_mq_hctx_notify(void *data, unsigned long action,
                              unsigned int cpu)
 {
@@ -1594,12 +1591,16 @@ static int blk_mq_hctx_notify(void *data, unsigned long action,
 
        if (action == CPU_DEAD || action == CPU_DEAD_FROZEN)
                return blk_mq_hctx_cpu_offline(hctx, cpu);
-       else if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN)
-               return blk_mq_hctx_cpu_online(hctx, cpu);
+
+       /*
+        * In case of CPU online, tags may be reallocated
+        * in blk_mq_map_swqueue() after mapping is updated.
+        */
 
        return NOTIFY_OK;
 }
 
+/* hctx->ctxs will be freed in queue's release handler */
 static void blk_mq_exit_hctx(struct request_queue *q,
                struct blk_mq_tag_set *set,
                struct blk_mq_hw_ctx *hctx, unsigned int hctx_idx)
@@ -1618,7 +1619,6 @@ static void blk_mq_exit_hctx(struct request_queue *q,
 
        blk_mq_unregister_cpu_notifier(&hctx->cpu_notifier);
        blk_free_flush_queue(hctx->fq);
-       kfree(hctx->ctxs);
        blk_mq_free_bitmap(&hctx->ctx_map);
 }
 
@@ -1775,6 +1775,7 @@ static void blk_mq_map_swqueue(struct request_queue *q)
        unsigned int i;
        struct blk_mq_hw_ctx *hctx;
        struct blk_mq_ctx *ctx;
+       struct blk_mq_tag_set *set = q->tag_set;
 
        queue_for_each_hw_ctx(q, hctx, i) {
                cpumask_clear(hctx->cpumask);
@@ -1803,16 +1804,20 @@ static void blk_mq_map_swqueue(struct request_queue *q)
                 * disable it and free the request entries.
                 */
                if (!hctx->nr_ctx) {
-                       struct blk_mq_tag_set *set = q->tag_set;
-
                        if (set->tags[i]) {
                                blk_mq_free_rq_map(set, set->tags[i], i);
                                set->tags[i] = NULL;
-                               hctx->tags = NULL;
                        }
+                       hctx->tags = NULL;
                        continue;
                }
 
+               /* unmapped hw queue can be remapped after CPU topo changed */
+               if (!set->tags[i])
+                       set->tags[i] = blk_mq_init_rq_map(set, i);
+               hctx->tags = set->tags[i];
+               WARN_ON(!hctx->tags);
+
                /*
                 * Set the map size to the number of mapped software queues.
                 * This is more accurate and more efficient than looping
@@ -1886,8 +1891,12 @@ void blk_mq_release(struct request_queue *q)
        unsigned int i;
 
        /* hctx kobj stays in hctx */
-       queue_for_each_hw_ctx(q, hctx, i)
+       queue_for_each_hw_ctx(q, hctx, i) {
+               if (!hctx)
+                       continue;
+               kfree(hctx->ctxs);
                kfree(hctx);
+       }
 
        kfree(q->queue_hw_ctx);
 
@@ -2090,9 +2099,16 @@ static int blk_mq_queue_reinit_notify(struct notifier_block *nb,
         */
        list_for_each_entry(q, &all_q_list, all_q_node)
                blk_mq_freeze_queue_start(q);
-       list_for_each_entry(q, &all_q_list, all_q_node)
+       list_for_each_entry(q, &all_q_list, all_q_node) {
                blk_mq_freeze_queue_wait(q);
 
+               /*
+                * timeout handler can't touch hw queue during the
+                * reinitialization
+                */
+               del_timer_sync(&q->timeout);
+       }
+
        list_for_each_entry(q, &all_q_list, all_q_node)
                blk_mq_queue_reinit(q);
 
index faaf36ade7ebdc2fdd363f174978bfb5683a4f9a..2b8fd302f677a967d87994f8a7532aab8dfe6569 100644 (file)
@@ -522,8 +522,6 @@ static void blk_release_queue(struct kobject *kobj)
 
        blk_trace_shutdown(q);
 
-       bdi_destroy(&q->backing_dev_info);
-
        ida_simple_remove(&blk_queue_ida, q->id);
        call_rcu(&q->rcu_head, blk_free_queue_rcu);
 }
index ab21ba203d5c7744f4da2afbba85ed45dde86b98..ed9dd80671204bdebc4005544097fb05b6c90c62 100644 (file)
@@ -221,8 +221,8 @@ bounce:
                if (page_to_pfn(page) <= queue_bounce_pfn(q) && !force)
                        continue;
 
-               inc_zone_page_state(to->bv_page, NR_BOUNCE);
                to->bv_page = mempool_alloc(pool, q->bounce_gfp);
+               inc_zone_page_state(to->bv_page, NR_BOUNCE);
 
                if (rw == WRITE) {
                        char *vto, *vfrom;
index 59794d0d38e34604a24b6e7a63bf309570b2f8fb..8985038f398ce503261dc4a29390a63c9f7b5b44 100644 (file)
@@ -157,7 +157,7 @@ struct elevator_queue *elevator_alloc(struct request_queue *q,
 
        eq = kzalloc_node(sizeof(*eq), GFP_KERNEL, q->node);
        if (unlikely(!eq))
-               goto err;
+               return NULL;
 
        eq->type = e;
        kobject_init(&eq->kobj, &elv_ktype);
@@ -165,10 +165,6 @@ struct elevator_queue *elevator_alloc(struct request_queue *q,
        hash_init(eq->hash);
 
        return eq;
-err:
-       kfree(eq);
-       elevator_put(e);
-       return NULL;
 }
 EXPORT_SYMBOL(elevator_alloc);
 
index 0a536dc05f3b559d6d04c1e819d65290f96f7c35..ea982eadaf6380b974d6b1d39a7197085217ac91 100644 (file)
@@ -422,9 +422,9 @@ int blk_alloc_devt(struct hd_struct *part, dev_t *devt)
        /* allocate ext devt */
        idr_preload(GFP_KERNEL);
 
-       spin_lock(&ext_devt_lock);
+       spin_lock_bh(&ext_devt_lock);
        idx = idr_alloc(&ext_devt_idr, part, 0, NR_EXT_DEVT, GFP_NOWAIT);
-       spin_unlock(&ext_devt_lock);
+       spin_unlock_bh(&ext_devt_lock);
 
        idr_preload_end();
        if (idx < 0)
@@ -449,9 +449,9 @@ void blk_free_devt(dev_t devt)
                return;
 
        if (MAJOR(devt) == BLOCK_EXT_MAJOR) {
-               spin_lock(&ext_devt_lock);
+               spin_lock_bh(&ext_devt_lock);
                idr_remove(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
-               spin_unlock(&ext_devt_lock);
+               spin_unlock_bh(&ext_devt_lock);
        }
 }
 
@@ -653,7 +653,6 @@ void del_gendisk(struct gendisk *disk)
        disk->flags &= ~GENHD_FL_UP;
 
        sysfs_remove_link(&disk_to_dev(disk)->kobj, "bdi");
-       bdi_unregister(&disk->queue->backing_dev_info);
        blk_unregister_queue(disk);
        blk_unregister_region(disk_devt(disk), disk->minors);
 
@@ -691,13 +690,13 @@ struct gendisk *get_gendisk(dev_t devt, int *partno)
        } else {
                struct hd_struct *part;
 
-               spin_lock(&ext_devt_lock);
+               spin_lock_bh(&ext_devt_lock);
                part = idr_find(&ext_devt_idr, blk_mangle_minor(MINOR(devt)));
                if (part && get_disk(part_to_disk(part))) {
                        *partno = part->partno;
                        disk = part_to_disk(part);
                }
-               spin_unlock(&ext_devt_lock);
+               spin_unlock_bh(&ext_devt_lock);
        }
 
        return disk;
index 8aaf298a80e165f3fb5f83b1e00b8636cf9d08a3..362905e7c841ff55b204891e9e93748f2cfea96e 100644 (file)
@@ -1512,15 +1512,6 @@ config CRYPTO_USER_API_RNG
          This option enables the user-spaces interface for random
          number generator algorithms.
 
-config CRYPTO_USER_API_AEAD
-       tristate "User-space interface for AEAD cipher algorithms"
-       depends on NET
-       select CRYPTO_AEAD
-       select CRYPTO_USER_API
-       help
-         This option enables the user-spaces interface for AEAD
-         cipher algorithms.
-
 config CRYPTO_HASH_INFO
        bool
 
index 00a6fe166fed52863e5b6249858adeee5690a394..69abada22373f54b5dd434ba514e9c6c80c180e5 100644 (file)
@@ -33,7 +33,7 @@ struct aead_ctx {
        /*
         * RSGL_MAX_ENTRIES is an artificial limit where user space at maximum
         * can cause the kernel to allocate RSGL_MAX_ENTRIES * ALG_MAX_PAGES
-        * bytes
+        * pages
         */
 #define RSGL_MAX_ENTRIES ALG_MAX_PAGES
        struct af_alg_sgl rsgl[RSGL_MAX_ENTRIES];
@@ -435,11 +435,10 @@ static int aead_recvmsg(struct socket *sock, struct msghdr *msg, size_t ignored,
                if (err < 0)
                        goto unlock;
                usedpages += err;
-               /* chain the new scatterlist with initial list */
+               /* chain the new scatterlist with previous one */
                if (cnt)
-                       scatterwalk_crypto_chain(ctx->rsgl[0].sg,
-                                       ctx->rsgl[cnt].sg, 1,
-                                       sg_nents(ctx->rsgl[cnt-1].sg));
+                       af_alg_link_sg(&ctx->rsgl[cnt-1], &ctx->rsgl[cnt]);
+
                /* we do not need more iovecs as we have sufficient memory */
                if (outlen <= usedpages)
                        break;
index 6bc9cbc01ad6a3f20c27740ebbf1a919e7c74d0d..00b39802d7ecf5826b8c0b5dc9036b9f2f452ba5 100644 (file)
@@ -105,7 +105,7 @@ static void round_robin_cpu(unsigned int tsk_index)
        mutex_lock(&round_robin_lock);
        cpumask_clear(tmp);
        for_each_cpu(cpu, pad_busy_cpus)
-               cpumask_or(tmp, tmp, topology_thread_cpumask(cpu));
+               cpumask_or(tmp, tmp, topology_sibling_cpumask(cpu));
        cpumask_andnot(tmp, cpu_online_mask, tmp);
        /* avoid HT sibilings if possible */
        if (cpumask_empty(tmp))
index b193f842599902445015a219687310cdf3bfe9c9..ff6d8adc9cda69c4d0d1f9a22ff3481846020cbd 100644 (file)
@@ -304,6 +304,8 @@ static const struct acpi_device_id acpi_pnp_device_ids[] = {
        {"PNPb006"},
        /* cs423x-pnpbios */
        {"CSC0100"},
+       {"CSC0103"},
+       {"CSC0110"},
        {"CSC0000"},
        {"GIM0100"},            /* Guillemot Turtlebeach something appears to be cs4232 compatible */
        /* es18xx-pnpbios */
index a72685c1e819660768933e84abe9d1076c953bff..5e8df9177da44781ac07942d146e47b97e954cda 100644 (file)
@@ -102,19 +102,12 @@ const struct acpi_predefined_names acpi_gbl_pre_defined_names[] = {
        {"_SB_", ACPI_TYPE_DEVICE, NULL},
        {"_SI_", ACPI_TYPE_LOCAL_SCOPE, NULL},
        {"_TZ_", ACPI_TYPE_DEVICE, NULL},
-       /*
-        * March, 2015:
-        * The _REV object is in the process of being deprecated, because
-        * other ACPI implementations permanently return 2. Thus, it
-        * has little or no value. Return 2 for compatibility with
-        * other ACPI implementations.
-        */
-       {"_REV", ACPI_TYPE_INTEGER, ACPI_CAST_PTR(char, 2)},
+       {"_REV", ACPI_TYPE_INTEGER, (char *)ACPI_CA_SUPPORT_LEVEL},
        {"_OS_", ACPI_TYPE_STRING, ACPI_OS_NAME},
-       {"_GL_", ACPI_TYPE_MUTEX, ACPI_CAST_PTR(char, 1)},
+       {"_GL_", ACPI_TYPE_MUTEX, (char *)1},
 
 #if !defined (ACPI_NO_METHOD_EXECUTION) || defined (ACPI_CONSTANT_EVAL_ONLY)
-       {"_OSI", ACPI_TYPE_METHOD, ACPI_CAST_PTR(char, 1)},
+       {"_OSI", ACPI_TYPE_METHOD, (char *)1},
 #endif
 
        /* Table terminator */
index ed65e9c4b5b0415c1dcc77690a4add725a49a0bd..3670bbab57a34e3c24e674f2e0e21112403d0490 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/nmi.h>
 #include <linux/hardirq.h>
 #include <linux/pstore.h>
+#include <linux/vmalloc.h>
 #include <acpi/apei.h>
 
 #include "apei-internal.h"
index 39748bb3a5430111b8cf4723462eb837137b0213..7ccba395c9ddbeb7a6725b336d69d01abd4b82b1 100644 (file)
@@ -182,7 +182,7 @@ static void __init acpi_request_region (struct acpi_generic_address *gas,
                request_mem_region(addr, length, desc);
 }
 
-static int __init acpi_reserve_resources(void)
+static void __init acpi_reserve_resources(void)
 {
        acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
                "ACPI PM1a_EVT_BLK");
@@ -211,10 +211,7 @@ static int __init acpi_reserve_resources(void)
        if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
                acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
                               acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
-
-       return 0;
 }
-device_initcall(acpi_reserve_resources);
 
 void acpi_os_printf(const char *fmt, ...)
 {
@@ -1845,6 +1842,7 @@ acpi_status __init acpi_os_initialize(void)
 
 acpi_status __init acpi_os_initialize1(void)
 {
+       acpi_reserve_resources();
        kacpid_wq = alloc_workqueue("kacpid", 0, 1);
        kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
        kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
index 5589a6e2a02346e3b2ce48656b3facea1abfc621..8244f013f21095a9508e80ef01621e0ffbaab106 100644 (file)
@@ -573,7 +573,7 @@ EXPORT_SYMBOL_GPL(acpi_dev_get_resources);
  * @ares: Input ACPI resource object.
  * @types: Valid resource types of IORESOURCE_XXX
  *
- * This is a hepler function to support acpi_dev_get_resources(), which filters
+ * This is a helper function to support acpi_dev_get_resources(), which filters
  * ACPI resource objects according to resource types.
  */
 int acpi_dev_filter_resource_type(struct acpi_resource *ares,
index 26e5b50605230e2e34a46d8118486fb1ba2eb939..bf034f8b7c1acde77f90ded7f39f70dbd636b7db 100644 (file)
@@ -14,6 +14,7 @@
 #include <linux/delay.h>
 #include <linux/module.h>
 #include <linux/interrupt.h>
+#include <linux/dmi.h>
 #include "sbshc.h"
 
 #define PREFIX "ACPI: "
@@ -87,6 +88,8 @@ enum acpi_smb_offset {
        ACPI_SMB_ALARM_DATA = 0x26,     /* 2 bytes alarm data */
 };
 
+static bool macbook;
+
 static inline int smb_hc_read(struct acpi_smb_hc *hc, u8 address, u8 *data)
 {
        return ec_read(hc->offset + address, data);
@@ -132,6 +135,8 @@ static int acpi_smbus_transaction(struct acpi_smb_hc *hc, u8 protocol,
        }
 
        mutex_lock(&hc->lock);
+       if (macbook)
+               udelay(5);
        if (smb_hc_read(hc, ACPI_SMB_PROTOCOL, &temp))
                goto end;
        if (temp) {
@@ -257,12 +262,29 @@ extern int acpi_ec_add_query_handler(struct acpi_ec *ec, u8 query_bit,
                              acpi_handle handle, acpi_ec_query_func func,
                              void *data);
 
+static int macbook_dmi_match(const struct dmi_system_id *d)
+{
+       pr_debug("Detected MacBook, enabling workaround\n");
+       macbook = true;
+       return 0;
+}
+
+static struct dmi_system_id acpi_smbus_dmi_table[] = {
+       { macbook_dmi_match, "Apple MacBook", {
+         DMI_MATCH(DMI_BOARD_VENDOR, "Apple"),
+         DMI_MATCH(DMI_PRODUCT_NAME, "MacBook") },
+       },
+       { },
+};
+
 static int acpi_smbus_hc_add(struct acpi_device *device)
 {
        int status;
        unsigned long long val;
        struct acpi_smb_hc *hc;
 
+       dmi_check_system(acpi_smbus_dmi_table);
+
        if (!device)
                return -EINVAL;
 
index 5f601553b9b043fff9ac80552ab55dbc4173c714..9dca4b995be0792b6c4f1920b9786cb6517a2b5f 100644 (file)
@@ -270,6 +270,7 @@ config ATA_PIIX
 config SATA_DWC
        tristate "DesignWare Cores SATA support"
        depends on 460EX
+       select DW_DMAC
        help
          This option enables support for the on-chip SATA controller of the
          AppliedMicro processor 460EX.
@@ -729,15 +730,6 @@ config PATA_SC1200
 
          If unsure, say N.
 
-config PATA_SCC
-       tristate "Toshiba's Cell Reference Set IDE support"
-       depends on PCI && PPC_CELLEB
-       help
-         This option enables support for the built-in IDE controller on
-         Toshiba Cell Reference Board.
-
-         If unsure, say N.
-
 config PATA_SCH
        tristate "Intel SCH PATA support"
        depends on PCI
index b67e995179a947bcdda7e5bf6f5d95275841ad49..40f7865f20a1dbf62123da6998fcef81fa9a2690 100644 (file)
@@ -75,7 +75,6 @@ obj-$(CONFIG_PATA_PDC_OLD)    += pata_pdc202xx_old.o
 obj-$(CONFIG_PATA_RADISYS)     += pata_radisys.o
 obj-$(CONFIG_PATA_RDC)         += pata_rdc.o
 obj-$(CONFIG_PATA_SC1200)      += pata_sc1200.o
-obj-$(CONFIG_PATA_SCC)         += pata_scc.o
 obj-$(CONFIG_PATA_SCH)         += pata_sch.o
 obj-$(CONFIG_PATA_SERVERWORKS) += pata_serverworks.o
 obj-$(CONFIG_PATA_SIL680)      += pata_sil680.o
index c7a92a743ed035e9af81ac779180fc65456a8390..65ee94454bbd2c92f1879386b19a346a5632794f 100644 (file)
@@ -66,6 +66,7 @@ enum board_ids {
        board_ahci_yes_fbs,
 
        /* board IDs for specific chipsets in alphabetical order */
+       board_ahci_avn,
        board_ahci_mcp65,
        board_ahci_mcp77,
        board_ahci_mcp89,
@@ -84,6 +85,8 @@ enum board_ids {
 static int ahci_init_one(struct pci_dev *pdev, const struct pci_device_id *ent);
 static int ahci_vt8251_hardreset(struct ata_link *link, unsigned int *class,
                                 unsigned long deadline);
+static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
+                             unsigned long deadline);
 static void ahci_mcp89_apple_enable(struct pci_dev *pdev);
 static bool is_mcp89_apple(struct pci_dev *pdev);
 static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
@@ -107,6 +110,11 @@ static struct ata_port_operations ahci_p5wdh_ops = {
        .hardreset              = ahci_p5wdh_hardreset,
 };
 
+static struct ata_port_operations ahci_avn_ops = {
+       .inherits               = &ahci_ops,
+       .hardreset              = ahci_avn_hardreset,
+};
+
 static const struct ata_port_info ahci_port_info[] = {
        /* by features */
        [board_ahci] = {
@@ -151,6 +159,12 @@ static const struct ata_port_info ahci_port_info[] = {
                .port_ops       = &ahci_ops,
        },
        /* by chipsets */
+       [board_ahci_avn] = {
+               .flags          = AHCI_FLAG_COMMON,
+               .pio_mask       = ATA_PIO4,
+               .udma_mask      = ATA_UDMA6,
+               .port_ops       = &ahci_avn_ops,
+       },
        [board_ahci_mcp65] = {
                AHCI_HFLAGS     (AHCI_HFLAG_NO_FPDMA_AA | AHCI_HFLAG_NO_PMP |
                                 AHCI_HFLAG_YES_NCQ),
@@ -290,14 +304,14 @@ static const struct pci_device_id ahci_pci_tbl[] = {
        { PCI_VDEVICE(INTEL, 0x1f27), board_ahci }, /* Avoton RAID */
        { PCI_VDEVICE(INTEL, 0x1f2e), board_ahci }, /* Avoton RAID */
        { PCI_VDEVICE(INTEL, 0x1f2f), board_ahci }, /* Avoton RAID */
-       { PCI_VDEVICE(INTEL, 0x1f32), board_ahci }, /* Avoton AHCI */
-       { PCI_VDEVICE(INTEL, 0x1f33), board_ahci }, /* Avoton AHCI */
-       { PCI_VDEVICE(INTEL, 0x1f34), board_ahci }, /* Avoton RAID */
-       { PCI_VDEVICE(INTEL, 0x1f35), board_ahci }, /* Avoton RAID */
-       { PCI_VDEVICE(INTEL, 0x1f36), board_ahci }, /* Avoton RAID */
-       { PCI_VDEVICE(INTEL, 0x1f37), board_ahci }, /* Avoton RAID */
-       { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci }, /* Avoton RAID */
-       { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci }, /* Avoton RAID */
+       { PCI_VDEVICE(INTEL, 0x1f32), board_ahci_avn }, /* Avoton AHCI */
+       { PCI_VDEVICE(INTEL, 0x1f33), board_ahci_avn }, /* Avoton AHCI */
+       { PCI_VDEVICE(INTEL, 0x1f34), board_ahci_avn }, /* Avoton RAID */
+       { PCI_VDEVICE(INTEL, 0x1f35), board_ahci_avn }, /* Avoton RAID */
+       { PCI_VDEVICE(INTEL, 0x1f36), board_ahci_avn }, /* Avoton RAID */
+       { PCI_VDEVICE(INTEL, 0x1f37), board_ahci_avn }, /* Avoton RAID */
+       { PCI_VDEVICE(INTEL, 0x1f3e), board_ahci_avn }, /* Avoton RAID */
+       { PCI_VDEVICE(INTEL, 0x1f3f), board_ahci_avn }, /* Avoton RAID */
        { PCI_VDEVICE(INTEL, 0x2823), board_ahci }, /* Wellsburg RAID */
        { PCI_VDEVICE(INTEL, 0x2827), board_ahci }, /* Wellsburg RAID */
        { PCI_VDEVICE(INTEL, 0x8d02), board_ahci }, /* Wellsburg AHCI */
@@ -670,6 +684,79 @@ static int ahci_p5wdh_hardreset(struct ata_link *link, unsigned int *class,
        return rc;
 }
 
+/*
+ * ahci_avn_hardreset - attempt more aggressive recovery of Avoton ports.
+ *
+ * It has been observed with some SSDs that the timing of events in the
+ * link synchronization phase can leave the port in a state that can not
+ * be recovered by a SATA-hard-reset alone.  The failing signature is
+ * SStatus.DET stuck at 1 ("Device presence detected but Phy
+ * communication not established").  It was found that unloading and
+ * reloading the driver when this problem occurs allows the drive
+ * connection to be recovered (DET advanced to 0x3).  The critical
+ * component of reloading the driver is that the port state machines are
+ * reset by bouncing "port enable" in the AHCI PCS configuration
+ * register.  So, reproduce that effect by bouncing a port whenever we
+ * see DET==1 after a reset.
+ */
+static int ahci_avn_hardreset(struct ata_link *link, unsigned int *class,
+                             unsigned long deadline)
+{
+       const unsigned long *timing = sata_ehc_deb_timing(&link->eh_context);
+       struct ata_port *ap = link->ap;
+       struct ahci_port_priv *pp = ap->private_data;
+       struct ahci_host_priv *hpriv = ap->host->private_data;
+       u8 *d2h_fis = pp->rx_fis + RX_FIS_D2H_REG;
+       unsigned long tmo = deadline - jiffies;
+       struct ata_taskfile tf;
+       bool online;
+       int rc, i;
+
+       DPRINTK("ENTER\n");
+
+       ahci_stop_engine(ap);
+
+       for (i = 0; i < 2; i++) {
+               u16 val;
+               u32 sstatus;
+               int port = ap->port_no;
+               struct ata_host *host = ap->host;
+               struct pci_dev *pdev = to_pci_dev(host->dev);
+
+               /* clear D2H reception area to properly wait for D2H FIS */
+               ata_tf_init(link->device, &tf);
+               tf.command = ATA_BUSY;
+               ata_tf_to_fis(&tf, 0, 0, d2h_fis);
+
+               rc = sata_link_hardreset(link, timing, deadline, &online,
+                               ahci_check_ready);
+
+               if (sata_scr_read(link, SCR_STATUS, &sstatus) != 0 ||
+                               (sstatus & 0xf) != 1)
+                       break;
+
+               ata_link_printk(link, KERN_INFO, "avn bounce port%d\n",
+                               port);
+
+               pci_read_config_word(pdev, 0x92, &val);
+               val &= ~(1 << port);
+               pci_write_config_word(pdev, 0x92, val);
+               ata_msleep(ap, 1000);
+               val |= 1 << port;
+               pci_write_config_word(pdev, 0x92, val);
+               deadline += tmo;
+       }
+
+       hpriv->start_engine(ap);
+
+       if (online)
+               *class = ahci_dev_classify(ap);
+
+       DPRINTK("EXIT, rc=%d, class=%u\n", rc, *class);
+       return rc;
+}
+
+
 #ifdef CONFIG_PM
 static int ahci_pci_device_suspend(struct pci_dev *pdev, pm_message_t mesg)
 {
index 23716dd8a7ec3f569f82db531e1ed71bc330c7d6..5928d0746a270e7b6b2ee12a022b19ed731f03fe 100644 (file)
@@ -45,7 +45,7 @@ static void ahci_mvebu_mbus_config(struct ahci_host_priv *hpriv,
                writel((cs->mbus_attr << 8) |
                       (dram->mbus_dram_target_id << 4) | 1,
                       hpriv->mmio + AHCI_WINDOW_CTRL(i));
-               writel(cs->base, hpriv->mmio + AHCI_WINDOW_BASE(i));
+               writel(cs->base >> 16, hpriv->mmio + AHCI_WINDOW_BASE(i));
                writel(((cs->size - 1) & 0xffff0000),
                       hpriv->mmio + AHCI_WINDOW_SIZE(i));
        }
index ea0ff005b86ce702e4ccd0224c5b8f14c0fcfc89..8ff428fe8e0fa00659218f58f041cf948ae8327c 100644 (file)
@@ -37,7 +37,6 @@ struct st_ahci_drv_data {
        struct reset_control *pwr;
        struct reset_control *sw_rst;
        struct reset_control *pwr_rst;
-       struct ahci_host_priv *hpriv;
 };
 
 static void st_ahci_configure_oob(void __iomem *mmio)
@@ -55,9 +54,10 @@ static void st_ahci_configure_oob(void __iomem *mmio)
        writel(new_val, mmio + ST_AHCI_OOBR);
 }
 
-static int st_ahci_deassert_resets(struct device *dev)
+static int st_ahci_deassert_resets(struct ahci_host_priv *hpriv,
+                               struct device *dev)
 {
-       struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev);
+       struct st_ahci_drv_data *drv_data = hpriv->plat_data;
        int err;
 
        if (drv_data->pwr) {
@@ -90,8 +90,8 @@ static int st_ahci_deassert_resets(struct device *dev)
 static void st_ahci_host_stop(struct ata_host *host)
 {
        struct ahci_host_priv *hpriv = host->private_data;
+       struct st_ahci_drv_data *drv_data = hpriv->plat_data;
        struct device *dev = host->dev;
-       struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev);
        int err;
 
        if (drv_data->pwr) {
@@ -103,29 +103,30 @@ static void st_ahci_host_stop(struct ata_host *host)
        ahci_platform_disable_resources(hpriv);
 }
 
-static int st_ahci_probe_resets(struct platform_device *pdev)
+static int st_ahci_probe_resets(struct ahci_host_priv *hpriv,
+                               struct device *dev)
 {
-       struct st_ahci_drv_data *drv_data = platform_get_drvdata(pdev);
+       struct st_ahci_drv_data *drv_data = hpriv->plat_data;
 
-       drv_data->pwr = devm_reset_control_get(&pdev->dev, "pwr-dwn");
+       drv_data->pwr = devm_reset_control_get(dev, "pwr-dwn");
        if (IS_ERR(drv_data->pwr)) {
-               dev_info(&pdev->dev, "power reset control not defined\n");
+               dev_info(dev, "power reset control not defined\n");
                drv_data->pwr = NULL;
        }
 
-       drv_data->sw_rst = devm_reset_control_get(&pdev->dev, "sw-rst");
+       drv_data->sw_rst = devm_reset_control_get(dev, "sw-rst");
        if (IS_ERR(drv_data->sw_rst)) {
-               dev_info(&pdev->dev, "soft reset control not defined\n");
+               dev_info(dev, "soft reset control not defined\n");
                drv_data->sw_rst = NULL;
        }
 
-       drv_data->pwr_rst = devm_reset_control_get(&pdev->dev, "pwr-rst");
+       drv_data->pwr_rst = devm_reset_control_get(dev, "pwr-rst");
        if (IS_ERR(drv_data->pwr_rst)) {
-               dev_dbg(&pdev->dev, "power soft reset control not defined\n");
+               dev_dbg(dev, "power soft reset control not defined\n");
                drv_data->pwr_rst = NULL;
        }
 
-       return st_ahci_deassert_resets(&pdev->dev);
+       return st_ahci_deassert_resets(hpriv, dev);
 }
 
 static struct ata_port_operations st_ahci_port_ops = {
@@ -154,15 +155,12 @@ static int st_ahci_probe(struct platform_device *pdev)
        if (!drv_data)
                return -ENOMEM;
 
-       platform_set_drvdata(pdev, drv_data);
-
        hpriv = ahci_platform_get_resources(pdev);
        if (IS_ERR(hpriv))
                return PTR_ERR(hpriv);
+       hpriv->plat_data = drv_data;
 
-       drv_data->hpriv = hpriv;
-
-       err = st_ahci_probe_resets(pdev);
+       err = st_ahci_probe_resets(hpriv, &pdev->dev);
        if (err)
                return err;
 
@@ -170,7 +168,7 @@ static int st_ahci_probe(struct platform_device *pdev)
        if (err)
                return err;
 
-       st_ahci_configure_oob(drv_data->hpriv->mmio);
+       st_ahci_configure_oob(hpriv->mmio);
 
        err = ahci_platform_init_host(pdev, hpriv, &st_ahci_port_info,
                                      &ahci_platform_sht);
@@ -185,8 +183,9 @@ static int st_ahci_probe(struct platform_device *pdev)
 #ifdef CONFIG_PM_SLEEP
 static int st_ahci_suspend(struct device *dev)
 {
-       struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev);
-       struct ahci_host_priv *hpriv = drv_data->hpriv;
+       struct ata_host *host = dev_get_drvdata(dev);
+       struct ahci_host_priv *hpriv = host->private_data;
+       struct st_ahci_drv_data *drv_data = hpriv->plat_data;
        int err;
 
        err = ahci_platform_suspend_host(dev);
@@ -208,21 +207,21 @@ static int st_ahci_suspend(struct device *dev)
 
 static int st_ahci_resume(struct device *dev)
 {
-       struct st_ahci_drv_data *drv_data = dev_get_drvdata(dev);
-       struct ahci_host_priv *hpriv = drv_data->hpriv;
+       struct ata_host *host = dev_get_drvdata(dev);
+       struct ahci_host_priv *hpriv = host->private_data;
        int err;
 
        err = ahci_platform_enable_resources(hpriv);
        if (err)
                return err;
 
-       err = st_ahci_deassert_resets(dev);
+       err = st_ahci_deassert_resets(hpriv, dev);
        if (err) {
                ahci_platform_disable_resources(hpriv);
                return err;
        }
 
-       st_ahci_configure_oob(drv_data->hpriv->mmio);
+       st_ahci_configure_oob(hpriv->mmio);
 
        return ahci_platform_resume_host(dev);
 }
index 61a9c07e0dff5b277dba35cfa135bac449f9ce84..287c4ba0219f7ced8c76af999dd1eeb3e5ed2639 100644 (file)
@@ -1707,8 +1707,7 @@ static void ahci_handle_port_interrupt(struct ata_port *ap,
        if (unlikely(resetting))
                status &= ~PORT_IRQ_BAD_PMP;
 
-       /* if LPM is enabled, PHYRDY doesn't mean anything */
-       if (ap->link.lpm_policy > ATA_LPM_MAX_POWER) {
+       if (sata_lpm_ignore_phy_events(&ap->link)) {
                status &= ~PORT_IRQ_PHYRDY;
                ahci_scr_write(&ap->link, SCR_ERROR, SERR_PHYRDY_CHG);
        }
index f6cb1f1b30b7466278d47dae09bc959db24dfef1..577849c6611ac5efa2c948c7b274dc894a19890d 100644 (file)
@@ -4235,7 +4235,7 @@ static const struct ata_blacklist_entry ata_device_blacklist [] = {
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
        { "Crucial_CT*MX100*",          "MU01", ATA_HORKAGE_NO_NCQ_TRIM |
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
-       { "Samsung SSD 850 PRO*",       NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
+       { "Samsung SSD 8*",             NULL,   ATA_HORKAGE_NO_NCQ_TRIM |
                                                ATA_HORKAGE_ZERO_AFTER_TRIM, },
 
        /*
@@ -6752,6 +6752,38 @@ u32 ata_wait_register(struct ata_port *ap, void __iomem *reg, u32 mask, u32 val,
        return tmp;
 }
 
+/**
+ *     sata_lpm_ignore_phy_events - test if PHY event should be ignored
+ *     @link: Link receiving the event
+ *
+ *     Test whether the received PHY event has to be ignored or not.
+ *
+ *     LOCKING:
+ *     None:
+ *
+ *     RETURNS:
+ *     True if the event has to be ignored.
+ */
+bool sata_lpm_ignore_phy_events(struct ata_link *link)
+{
+       unsigned long lpm_timeout = link->last_lpm_change +
+                                   msecs_to_jiffies(ATA_TMOUT_SPURIOUS_PHY);
+
+       /* if LPM is enabled, PHYRDY doesn't mean anything */
+       if (link->lpm_policy > ATA_LPM_MAX_POWER)
+               return true;
+
+       /* ignore the first PHY event after the LPM policy changed
+        * as it is might be spurious
+        */
+       if ((link->flags & ATA_LFLAG_CHANGED) &&
+           time_before(jiffies, lpm_timeout))
+               return true;
+
+       return false;
+}
+EXPORT_SYMBOL_GPL(sata_lpm_ignore_phy_events);
+
 /*
  * Dummy port_ops
  */
index 07f41be38fbe556ffff9bd5efcacf8ed4538034e..cf0022ec07f2420c37fb8a52dc904530f0df2e38 100644 (file)
@@ -3597,6 +3597,9 @@ static int ata_eh_set_lpm(struct ata_link *link, enum ata_lpm_policy policy,
                }
        }
 
+       link->last_lpm_change = jiffies;
+       link->flags |= ATA_LFLAG_CHANGED;
+
        return 0;
 
 fail:
index 80a80548ad0a80acf28c3407e6a6048825f92af3..27245957eee3cd906f546d67853d2ebd6ce54d30 100644 (file)
@@ -1053,7 +1053,7 @@ static struct of_device_id octeon_cf_match[] = {
        },
        {},
 };
-MODULE_DEVICE_TABLE(of, octeon_i2c_match);
+MODULE_DEVICE_TABLE(of, octeon_cf_match);
 
 static struct platform_driver octeon_cf_driver = {
        .probe          = octeon_cf_probe,
diff --git a/drivers/ata/pata_scc.c b/drivers/ata/pata_scc.c
deleted file mode 100644 (file)
index 5cd60d6..0000000
+++ /dev/null
@@ -1,1110 +0,0 @@
-/*
- * Support for IDE interfaces on Celleb platform
- *
- * (C) Copyright 2006 TOSHIBA CORPORATION
- *
- * This code is based on drivers/ata/ata_piix.c:
- *  Copyright 2003-2005 Red Hat Inc
- *  Copyright 2003-2005 Jeff Garzik
- *  Copyright (C) 1998-1999 Andrzej Krzysztofowicz, Author and Maintainer
- *  Copyright (C) 1998-2000 Andre Hedrick <andre@linux-ide.org>
- *  Copyright (C) 2003 Red Hat Inc
- *
- * and drivers/ata/ahci.c:
- *  Copyright 2004-2005 Red Hat, Inc.
- *
- * and drivers/ata/libata-core.c:
- *  Copyright 2003-2004 Red Hat, Inc.  All rights reserved.
- *  Copyright 2003-2004 Jeff Garzik
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/blkdev.h>
-#include <linux/delay.h>
-#include <linux/device.h>
-#include <scsi/scsi_host.h>
-#include <linux/libata.h>
-
-#define DRV_NAME               "pata_scc"
-#define DRV_VERSION            "0.3"
-
-#define PCI_DEVICE_ID_TOSHIBA_SCC_ATA          0x01b4
-
-/* PCI BARs */
-#define SCC_CTRL_BAR           0
-#define SCC_BMID_BAR           1
-
-/* offset of CTRL registers */
-#define SCC_CTL_PIOSHT         0x000
-#define SCC_CTL_PIOCT          0x004
-#define SCC_CTL_MDMACT         0x008
-#define SCC_CTL_MCRCST         0x00C
-#define SCC_CTL_SDMACT         0x010
-#define SCC_CTL_SCRCST         0x014
-#define SCC_CTL_UDENVT         0x018
-#define SCC_CTL_TDVHSEL        0x020
-#define SCC_CTL_MODEREG        0x024
-#define SCC_CTL_ECMODE         0xF00
-#define SCC_CTL_MAEA0          0xF50
-#define SCC_CTL_MAEC0          0xF54
-#define SCC_CTL_CCKCTRL        0xFF0
-
-/* offset of BMID registers */
-#define SCC_DMA_CMD            0x000
-#define SCC_DMA_STATUS         0x004
-#define SCC_DMA_TABLE_OFS      0x008
-#define SCC_DMA_INTMASK        0x010
-#define SCC_DMA_INTST          0x014
-#define SCC_DMA_PTERADD        0x018
-#define SCC_REG_CMD_ADDR       0x020
-#define SCC_REG_DATA           0x000
-#define SCC_REG_ERR            0x004
-#define SCC_REG_FEATURE        0x004
-#define SCC_REG_NSECT          0x008
-#define SCC_REG_LBAL           0x00C
-#define SCC_REG_LBAM           0x010
-#define SCC_REG_LBAH           0x014
-#define SCC_REG_DEVICE         0x018
-#define SCC_REG_STATUS         0x01C
-#define SCC_REG_CMD            0x01C
-#define SCC_REG_ALTSTATUS      0x020
-
-/* register value */
-#define TDVHSEL_MASTER         0x00000001
-#define TDVHSEL_SLAVE          0x00000004
-
-#define MODE_JCUSFEN           0x00000080
-
-#define ECMODE_VALUE           0x01
-
-#define CCKCTRL_ATARESET       0x00040000
-#define CCKCTRL_BUFCNT         0x00020000
-#define CCKCTRL_CRST           0x00010000
-#define CCKCTRL_OCLKEN         0x00000100
-#define CCKCTRL_ATACLKOEN      0x00000002
-#define CCKCTRL_LCLKEN         0x00000001
-
-#define QCHCD_IOS_SS           0x00000001
-
-#define QCHSD_STPDIAG          0x00020000
-
-#define INTMASK_MSK            0xD1000012
-#define INTSTS_SERROR          0x80000000
-#define INTSTS_PRERR           0x40000000
-#define INTSTS_RERR            0x10000000
-#define INTSTS_ICERR           0x01000000
-#define INTSTS_BMSINT          0x00000010
-#define INTSTS_BMHE            0x00000008
-#define INTSTS_IOIRQS          0x00000004
-#define INTSTS_INTRQ           0x00000002
-#define INTSTS_ACTEINT         0x00000001
-
-
-/* PIO transfer mode table */
-/* JCHST */
-static const unsigned long JCHSTtbl[2][7] = {
-       {0x0E, 0x05, 0x02, 0x03, 0x02, 0x00, 0x00},     /* 100MHz */
-       {0x13, 0x07, 0x04, 0x04, 0x03, 0x00, 0x00}      /* 133MHz */
-};
-
-/* JCHHT */
-static const unsigned long JCHHTtbl[2][7] = {
-       {0x0E, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00},     /* 100MHz */
-       {0x13, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00}      /* 133MHz */
-};
-
-/* JCHCT */
-static const unsigned long JCHCTtbl[2][7] = {
-       {0x1D, 0x1D, 0x1C, 0x0B, 0x06, 0x00, 0x00},     /* 100MHz */
-       {0x27, 0x26, 0x26, 0x0E, 0x09, 0x00, 0x00}      /* 133MHz */
-};
-
-/* DMA transfer mode  table */
-/* JCHDCTM/JCHDCTS */
-static const unsigned long JCHDCTxtbl[2][7] = {
-       {0x0A, 0x06, 0x04, 0x03, 0x01, 0x00, 0x00},     /* 100MHz */
-       {0x0E, 0x09, 0x06, 0x04, 0x02, 0x01, 0x00}      /* 133MHz */
-};
-
-/* JCSTWTM/JCSTWTS  */
-static const unsigned long JCSTWTxtbl[2][7] = {
-       {0x06, 0x04, 0x03, 0x02, 0x02, 0x02, 0x00},     /* 100MHz */
-       {0x09, 0x06, 0x04, 0x02, 0x02, 0x02, 0x02}      /* 133MHz */
-};
-
-/* JCTSS */
-static const unsigned long JCTSStbl[2][7] = {
-       {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00},     /* 100MHz */
-       {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05}      /* 133MHz */
-};
-
-/* JCENVT */
-static const unsigned long JCENVTtbl[2][7] = {
-       {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00},     /* 100MHz */
-       {0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02}      /* 133MHz */
-};
-
-/* JCACTSELS/JCACTSELM */
-static const unsigned long JCACTSELtbl[2][7] = {
-       {0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00},     /* 100MHz */
-       {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}      /* 133MHz */
-};
-
-static const struct pci_device_id scc_pci_tbl[] = {
-       { PCI_VDEVICE(TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SCC_ATA), 0},
-       { }     /* terminate list */
-};
-
-/**
- *     scc_set_piomode - Initialize host controller PATA PIO timings
- *     @ap: Port whose timings we are configuring
- *     @adev: um
- *
- *     Set PIO mode for device.
- *
- *     LOCKING:
- *     None (inherited from caller).
- */
-
-static void scc_set_piomode (struct ata_port *ap, struct ata_device *adev)
-{
-       unsigned int pio = adev->pio_mode - XFER_PIO_0;
-       void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR];
-       void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL;
-       void __iomem *piosht_port = ctrl_base + SCC_CTL_PIOSHT;
-       void __iomem *pioct_port = ctrl_base + SCC_CTL_PIOCT;
-       unsigned long reg;
-       int offset;
-
-       reg = in_be32(cckctrl_port);
-       if (reg & CCKCTRL_ATACLKOEN)
-               offset = 1;     /* 133MHz */
-       else
-               offset = 0;     /* 100MHz */
-
-       reg = JCHSTtbl[offset][pio] << 16 | JCHHTtbl[offset][pio];
-       out_be32(piosht_port, reg);
-       reg = JCHCTtbl[offset][pio];
-       out_be32(pioct_port, reg);
-}
-
-/**
- *     scc_set_dmamode - Initialize host controller PATA DMA timings
- *     @ap: Port whose timings we are configuring
- *     @adev: um
- *
- *     Set UDMA mode for device.
- *
- *     LOCKING:
- *     None (inherited from caller).
- */
-
-static void scc_set_dmamode (struct ata_port *ap, struct ata_device *adev)
-{
-       unsigned int udma = adev->dma_mode;
-       unsigned int is_slave = (adev->devno != 0);
-       u8 speed = udma;
-       void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR];
-       void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL;
-       void __iomem *mdmact_port = ctrl_base + SCC_CTL_MDMACT;
-       void __iomem *mcrcst_port = ctrl_base + SCC_CTL_MCRCST;
-       void __iomem *sdmact_port = ctrl_base + SCC_CTL_SDMACT;
-       void __iomem *scrcst_port = ctrl_base + SCC_CTL_SCRCST;
-       void __iomem *udenvt_port = ctrl_base + SCC_CTL_UDENVT;
-       void __iomem *tdvhsel_port = ctrl_base + SCC_CTL_TDVHSEL;
-       int offset, idx;
-
-       if (in_be32(cckctrl_port) & CCKCTRL_ATACLKOEN)
-               offset = 1;     /* 133MHz */
-       else
-               offset = 0;     /* 100MHz */
-
-       if (speed >= XFER_UDMA_0)
-               idx = speed - XFER_UDMA_0;
-       else
-               return;
-
-       if (is_slave) {
-               out_be32(sdmact_port, JCHDCTxtbl[offset][idx]);
-               out_be32(scrcst_port, JCSTWTxtbl[offset][idx]);
-               out_be32(tdvhsel_port,
-                        (in_be32(tdvhsel_port) & ~TDVHSEL_SLAVE) | (JCACTSELtbl[offset][idx] << 2));
-       } else {
-               out_be32(mdmact_port, JCHDCTxtbl[offset][idx]);
-               out_be32(mcrcst_port, JCSTWTxtbl[offset][idx]);
-               out_be32(tdvhsel_port,
-                        (in_be32(tdvhsel_port) & ~TDVHSEL_MASTER) | JCACTSELtbl[offset][idx]);
-       }
-       out_be32(udenvt_port,
-                JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx]);
-}
-
-unsigned long scc_mode_filter(struct ata_device *adev, unsigned long mask)
-{
-       /* errata A308 workaround: limit ATAPI UDMA mode to UDMA4 */
-       if (adev->class == ATA_DEV_ATAPI &&
-           (mask & (0xE0 << ATA_SHIFT_UDMA))) {
-               printk(KERN_INFO "%s: limit ATAPI UDMA to UDMA4\n", DRV_NAME);
-               mask &= ~(0xE0 << ATA_SHIFT_UDMA);
-       }
-       return mask;
-}
-
-/**
- *     scc_tf_load - send taskfile registers to host controller
- *     @ap: Port to which output is sent
- *     @tf: ATA taskfile register set
- *
- *     Note: Original code is ata_sff_tf_load().
- */
-
-static void scc_tf_load (struct ata_port *ap, const struct ata_taskfile *tf)
-{
-       struct ata_ioports *ioaddr = &ap->ioaddr;
-       unsigned int is_addr = tf->flags & ATA_TFLAG_ISADDR;
-
-       if (tf->ctl != ap->last_ctl) {
-               out_be32(ioaddr->ctl_addr, tf->ctl);
-               ap->last_ctl = tf->ctl;
-               ata_wait_idle(ap);
-       }
-
-       if (is_addr && (tf->flags & ATA_TFLAG_LBA48)) {
-               out_be32(ioaddr->feature_addr, tf->hob_feature);
-               out_be32(ioaddr->nsect_addr, tf->hob_nsect);
-               out_be32(ioaddr->lbal_addr, tf->hob_lbal);
-               out_be32(ioaddr->lbam_addr, tf->hob_lbam);
-               out_be32(ioaddr->lbah_addr, tf->hob_lbah);
-               VPRINTK("hob: feat 0x%X nsect 0x%X, lba 0x%X 0x%X 0x%X\n",
-                       tf->hob_feature,
-                       tf->hob_nsect,
-                       tf->hob_lbal,
-                       tf->hob_lbam,
-                       tf->hob_lbah);
-       }
-
-       if (is_addr) {
-               out_be32(ioaddr->feature_addr, tf->feature);
-               out_be32(ioaddr->nsect_addr, tf->nsect);
-               out_be32(ioaddr->lbal_addr, tf->lbal);
-               out_be32(ioaddr->lbam_addr, tf->lbam);
-               out_be32(ioaddr->lbah_addr, tf->lbah);
-               VPRINTK("feat 0x%X nsect 0x%X lba 0x%X 0x%X 0x%X\n",
-                       tf->feature,
-                       tf->nsect,
-                       tf->lbal,
-                       tf->lbam,
-                       tf->lbah);
-       }
-
-       if (tf->flags & ATA_TFLAG_DEVICE) {
-               out_be32(ioaddr->device_addr, tf->device);
-               VPRINTK("device 0x%X\n", tf->device);
-       }
-
-       ata_wait_idle(ap);
-}
-
-/**
- *     scc_check_status - Read device status reg & clear interrupt
- *     @ap: port where the device is
- *
- *     Note: Original code is ata_check_status().
- */
-
-static u8 scc_check_status (struct ata_port *ap)
-{
-       return in_be32(ap->ioaddr.status_addr);
-}
-
-/**
- *     scc_tf_read - input device's ATA taskfile shadow registers
- *     @ap: Port from which input is read
- *     @tf: ATA taskfile register set for storing input
- *
- *     Note: Original code is ata_sff_tf_read().
- */
-
-static void scc_tf_read (struct ata_port *ap, struct ata_taskfile *tf)
-{
-       struct ata_ioports *ioaddr = &ap->ioaddr;
-
-       tf->command = scc_check_status(ap);
-       tf->feature = in_be32(ioaddr->error_addr);
-       tf->nsect = in_be32(ioaddr->nsect_addr);
-       tf->lbal = in_be32(ioaddr->lbal_addr);
-       tf->lbam = in_be32(ioaddr->lbam_addr);
-       tf->lbah = in_be32(ioaddr->lbah_addr);
-       tf->device = in_be32(ioaddr->device_addr);
-
-       if (tf->flags & ATA_TFLAG_LBA48) {
-               out_be32(ioaddr->ctl_addr, tf->ctl | ATA_HOB);
-               tf->hob_feature = in_be32(ioaddr->error_addr);
-               tf->hob_nsect = in_be32(ioaddr->nsect_addr);
-               tf->hob_lbal = in_be32(ioaddr->lbal_addr);
-               tf->hob_lbam = in_be32(ioaddr->lbam_addr);
-               tf->hob_lbah = in_be32(ioaddr->lbah_addr);
-               out_be32(ioaddr->ctl_addr, tf->ctl);
-               ap->last_ctl = tf->ctl;
-       }
-}
-
-/**
- *     scc_exec_command - issue ATA command to host controller
- *     @ap: port to which command is being issued
- *     @tf: ATA taskfile register set
- *
- *     Note: Original code is ata_sff_exec_command().
- */
-
-static void scc_exec_command (struct ata_port *ap,
-                             const struct ata_taskfile *tf)
-{
-       DPRINTK("ata%u: cmd 0x%X\n", ap->print_id, tf->command);
-
-       out_be32(ap->ioaddr.command_addr, tf->command);
-       ata_sff_pause(ap);
-}
-
-/**
- *     scc_check_altstatus - Read device alternate status reg
- *     @ap: port where the device is
- */
-
-static u8 scc_check_altstatus (struct ata_port *ap)
-{
-       return in_be32(ap->ioaddr.altstatus_addr);
-}
-
-/**
- *     scc_dev_select - Select device 0/1 on ATA bus
- *     @ap: ATA channel to manipulate
- *     @device: ATA device (numbered from zero) to select
- *
- *     Note: Original code is ata_sff_dev_select().
- */
-
-static void scc_dev_select (struct ata_port *ap, unsigned int device)
-{
-       u8 tmp;
-
-       if (device == 0)
-               tmp = ATA_DEVICE_OBS;
-       else
-               tmp = ATA_DEVICE_OBS | ATA_DEV1;
-
-       out_be32(ap->ioaddr.device_addr, tmp);
-       ata_sff_pause(ap);
-}
-
-/**
- *     scc_set_devctl - Write device control reg
- *     @ap: port where the device is
- *     @ctl: value to write
- */
-
-static void scc_set_devctl(struct ata_port *ap, u8 ctl)
-{
-       out_be32(ap->ioaddr.ctl_addr, ctl);
-}
-
-/**
- *     scc_bmdma_setup - Set up PCI IDE BMDMA transaction
- *     @qc: Info associated with this ATA transaction.
- *
- *     Note: Original code is ata_bmdma_setup().
- */
-
-static void scc_bmdma_setup (struct ata_queued_cmd *qc)
-{
-       struct ata_port *ap = qc->ap;
-       unsigned int rw = (qc->tf.flags & ATA_TFLAG_WRITE);
-       u8 dmactl;
-       void __iomem *mmio = ap->ioaddr.bmdma_addr;
-
-       /* load PRD table addr */
-       out_be32(mmio + SCC_DMA_TABLE_OFS, ap->bmdma_prd_dma);
-
-       /* specify data direction, triple-check start bit is clear */
-       dmactl = in_be32(mmio + SCC_DMA_CMD);
-       dmactl &= ~(ATA_DMA_WR | ATA_DMA_START);
-       if (!rw)
-               dmactl |= ATA_DMA_WR;
-       out_be32(mmio + SCC_DMA_CMD, dmactl);
-
-       /* issue r/w command */
-       ap->ops->sff_exec_command(ap, &qc->tf);
-}
-
-/**
- *     scc_bmdma_start - Start a PCI IDE BMDMA transaction
- *     @qc: Info associated with this ATA transaction.
- *
- *     Note: Original code is ata_bmdma_start().
- */
-
-static void scc_bmdma_start (struct ata_queued_cmd *qc)
-{
-       struct ata_port *ap = qc->ap;
-       u8 dmactl;
-       void __iomem *mmio = ap->ioaddr.bmdma_addr;
-
-       /* start host DMA transaction */
-       dmactl = in_be32(mmio + SCC_DMA_CMD);
-       out_be32(mmio + SCC_DMA_CMD, dmactl | ATA_DMA_START);
-}
-
-/**
- *     scc_devchk - PATA device presence detection
- *     @ap: ATA channel to examine
- *     @device: Device to examine (starting at zero)
- *
- *     Note: Original code is ata_devchk().
- */
-
-static unsigned int scc_devchk (struct ata_port *ap,
-                               unsigned int device)
-{
-       struct ata_ioports *ioaddr = &ap->ioaddr;
-       u8 nsect, lbal;
-
-       ap->ops->sff_dev_select(ap, device);
-
-       out_be32(ioaddr->nsect_addr, 0x55);
-       out_be32(ioaddr->lbal_addr, 0xaa);
-
-       out_be32(ioaddr->nsect_addr, 0xaa);
-       out_be32(ioaddr->lbal_addr, 0x55);
-
-       out_be32(ioaddr->nsect_addr, 0x55);
-       out_be32(ioaddr->lbal_addr, 0xaa);
-
-       nsect = in_be32(ioaddr->nsect_addr);
-       lbal = in_be32(ioaddr->lbal_addr);
-
-       if ((nsect == 0x55) && (lbal == 0xaa))
-               return 1;       /* we found a device */
-
-       return 0;               /* nothing found */
-}
-
-/**
- *     scc_wait_after_reset - wait for devices to become ready after reset
- *
- *     Note: Original code is ata_sff_wait_after_reset
- */
-
-static int scc_wait_after_reset(struct ata_link *link, unsigned int devmask,
-                               unsigned long deadline)
-{
-       struct ata_port *ap = link->ap;
-       struct ata_ioports *ioaddr = &ap->ioaddr;
-       unsigned int dev0 = devmask & (1 << 0);
-       unsigned int dev1 = devmask & (1 << 1);
-       int rc, ret = 0;
-
-       /* Spec mandates ">= 2ms" before checking status.  We wait
-        * 150ms, because that was the magic delay used for ATAPI
-        * devices in Hale Landis's ATADRVR, for the period of time
-        * between when the ATA command register is written, and then
-        * status is checked.  Because waiting for "a while" before
-        * checking status is fine, post SRST, we perform this magic
-        * delay here as well.
-        *
-        * Old drivers/ide uses the 2mS rule and then waits for ready.
-        */
-       ata_msleep(ap, 150);
-
-       /* always check readiness of the master device */
-       rc = ata_sff_wait_ready(link, deadline);
-       /* -ENODEV means the odd clown forgot the D7 pulldown resistor
-        * and TF status is 0xff, bail out on it too.
-        */
-       if (rc)
-               return rc;
-
-       /* if device 1 was found in ata_devchk, wait for register
-        * access briefly, then wait for BSY to clear.
-        */
-       if (dev1) {
-               int i;
-
-               ap->ops->sff_dev_select(ap, 1);
-
-               /* Wait for register access.  Some ATAPI devices fail
-                * to set nsect/lbal after reset, so don't waste too
-                * much time on it.  We're gonna wait for !BSY anyway.
-                */
-               for (i = 0; i < 2; i++) {
-                       u8 nsect, lbal;
-
-                       nsect = in_be32(ioaddr->nsect_addr);
-                       lbal = in_be32(ioaddr->lbal_addr);
-                       if ((nsect == 1) && (lbal == 1))
-                               break;
-                       ata_msleep(ap, 50);     /* give drive a breather */
-               }
-
-               rc = ata_sff_wait_ready(link, deadline);
-               if (rc) {
-                       if (rc != -ENODEV)
-                               return rc;
-                       ret = rc;
-               }
-       }
-
-       /* is all this really necessary? */
-       ap->ops->sff_dev_select(ap, 0);
-       if (dev1)
-               ap->ops->sff_dev_select(ap, 1);
-       if (dev0)
-               ap->ops->sff_dev_select(ap, 0);
-
-       return ret;
-}
-
-/**
- *     scc_bus_softreset - PATA device software reset
- *
- *     Note: Original code is ata_bus_softreset().
- */
-
-static int scc_bus_softreset(struct ata_port *ap, unsigned int devmask,
-                                      unsigned long deadline)
-{
-       struct ata_ioports *ioaddr = &ap->ioaddr;
-
-       DPRINTK("ata%u: bus reset via SRST\n", ap->print_id);
-
-       /* software reset.  causes dev0 to be selected */
-       out_be32(ioaddr->ctl_addr, ap->ctl);
-       udelay(20);
-       out_be32(ioaddr->ctl_addr, ap->ctl | ATA_SRST);
-       udelay(20);
-       out_be32(ioaddr->ctl_addr, ap->ctl);
-
-       return scc_wait_after_reset(&ap->link, devmask, deadline);
-}
-
-/**
- *     scc_softreset - reset host port via ATA SRST
- *     @ap: port to reset
- *     @classes: resulting classes of attached devices
- *     @deadline: deadline jiffies for the operation
- *
- *     Note: Original code is ata_sff_softreset().
- */
-
-static int scc_softreset(struct ata_link *link, unsigned int *classes,
-                        unsigned long deadline)
-{
-       struct ata_port *ap = link->ap;
-       unsigned int slave_possible = ap->flags & ATA_FLAG_SLAVE_POSS;
-       unsigned int devmask = 0;
-       int rc;
-       u8 err;
-
-       DPRINTK("ENTER\n");
-
-       /* determine if device 0/1 are present */
-       if (scc_devchk(ap, 0))
-               devmask |= (1 << 0);
-       if (slave_possible && scc_devchk(ap, 1))
-               devmask |= (1 << 1);
-
-       /* select device 0 again */
-       ap->ops->sff_dev_select(ap, 0);
-
-       /* issue bus reset */
-       DPRINTK("about to softreset, devmask=%x\n", devmask);
-       rc = scc_bus_softreset(ap, devmask, deadline);
-       if (rc) {
-               ata_port_err(ap, "SRST failed (err_mask=0x%x)\n", rc);
-               return -EIO;
-       }
-
-       /* determine by signature whether we have ATA or ATAPI devices */
-       classes[0] = ata_sff_dev_classify(&ap->link.device[0],
-                                         devmask & (1 << 0), &err);
-       if (slave_possible && err != 0x81)
-               classes[1] = ata_sff_dev_classify(&ap->link.device[1],
-                                                 devmask & (1 << 1), &err);
-
-       DPRINTK("EXIT, classes[0]=%u [1]=%u\n", classes[0], classes[1]);
-       return 0;
-}
-
-/**
- *     scc_bmdma_stop - Stop PCI IDE BMDMA transfer
- *     @qc: Command we are ending DMA for
- */
-
-static void scc_bmdma_stop (struct ata_queued_cmd *qc)
-{
-       struct ata_port *ap = qc->ap;
-       void __iomem *ctrl_base = ap->host->iomap[SCC_CTRL_BAR];
-       void __iomem *bmid_base = ap->host->iomap[SCC_BMID_BAR];
-       u32 reg;
-
-       while (1) {
-               reg = in_be32(bmid_base + SCC_DMA_INTST);
-
-               if (reg & INTSTS_SERROR) {
-                       printk(KERN_WARNING "%s: SERROR\n", DRV_NAME);
-                       out_be32(bmid_base + SCC_DMA_INTST, INTSTS_SERROR|INTSTS_BMSINT);
-                       out_be32(bmid_base + SCC_DMA_CMD,
-                                in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
-                       continue;
-               }
-
-               if (reg & INTSTS_PRERR) {
-                       u32 maea0, maec0;
-                       maea0 = in_be32(ctrl_base + SCC_CTL_MAEA0);
-                       maec0 = in_be32(ctrl_base + SCC_CTL_MAEC0);
-                       printk(KERN_WARNING "%s: PRERR [addr:%x cmd:%x]\n", DRV_NAME, maea0, maec0);
-                       out_be32(bmid_base + SCC_DMA_INTST, INTSTS_PRERR|INTSTS_BMSINT);
-                       out_be32(bmid_base + SCC_DMA_CMD,
-                                in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
-                       continue;
-               }
-
-               if (reg & INTSTS_RERR) {
-                       printk(KERN_WARNING "%s: Response Error\n", DRV_NAME);
-                       out_be32(bmid_base + SCC_DMA_INTST, INTSTS_RERR|INTSTS_BMSINT);
-                       out_be32(bmid_base + SCC_DMA_CMD,
-                                in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
-                       continue;
-               }
-
-               if (reg & INTSTS_ICERR) {
-                       out_be32(bmid_base + SCC_DMA_CMD,
-                                in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
-                       printk(KERN_WARNING "%s: Illegal Configuration\n", DRV_NAME);
-                       out_be32(bmid_base + SCC_DMA_INTST, INTSTS_ICERR|INTSTS_BMSINT);
-                       continue;
-               }
-
-               if (reg & INTSTS_BMSINT) {
-                       unsigned int classes;
-                       unsigned long deadline = ata_deadline(jiffies, ATA_TMOUT_BOOT);
-                       printk(KERN_WARNING "%s: Internal Bus Error\n", DRV_NAME);
-                       out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMSINT);
-                       /* TBD: SW reset */
-                       scc_softreset(&ap->link, &classes, deadline);
-                       continue;
-               }
-
-               if (reg & INTSTS_BMHE) {
-                       out_be32(bmid_base + SCC_DMA_INTST, INTSTS_BMHE);
-                       continue;
-               }
-
-               if (reg & INTSTS_ACTEINT) {
-                       out_be32(bmid_base + SCC_DMA_INTST, INTSTS_ACTEINT);
-                       continue;
-               }
-
-               if (reg & INTSTS_IOIRQS) {
-                       out_be32(bmid_base + SCC_DMA_INTST, INTSTS_IOIRQS);
-                       continue;
-               }
-               break;
-       }
-
-       /* clear start/stop bit */
-       out_be32(bmid_base + SCC_DMA_CMD,
-                in_be32(bmid_base + SCC_DMA_CMD) & ~ATA_DMA_START);
-
-       /* one-PIO-cycle guaranteed wait, per spec, for HDMA1:0 transition */
-       ata_sff_dma_pause(ap);  /* dummy read */
-}
-
-/**
- *     scc_bmdma_status - Read PCI IDE BMDMA status
- *     @ap: Port associated with this ATA transaction.
- */
-
-static u8 scc_bmdma_status (struct ata_port *ap)
-{
-       void __iomem *mmio = ap->ioaddr.bmdma_addr;
-       u8 host_stat = in_be32(mmio + SCC_DMA_STATUS);
-       u32 int_status = in_be32(mmio + SCC_DMA_INTST);
-       struct ata_queued_cmd *qc = ata_qc_from_tag(ap, ap->link.active_tag);
-       static int retry = 0;
-
-       /* return if IOS_SS is cleared */
-       if (!(in_be32(mmio + SCC_DMA_CMD) & ATA_DMA_START))
-               return host_stat;
-
-       /* errata A252,A308 workaround: Step4 */
-       if ((scc_check_altstatus(ap) & ATA_ERR)
-                                       && (int_status & INTSTS_INTRQ))
-               return (host_stat | ATA_DMA_INTR);
-
-       /* errata A308 workaround Step5 */
-       if (int_status & INTSTS_IOIRQS) {
-               host_stat |= ATA_DMA_INTR;
-
-               /* We don't check ATAPI DMA because it is limited to UDMA4 */
-               if ((qc->tf.protocol == ATA_PROT_DMA &&
-                    qc->dev->xfer_mode > XFER_UDMA_4)) {
-                       if (!(int_status & INTSTS_ACTEINT)) {
-                               printk(KERN_WARNING "ata%u: operation failed (transfer data loss)\n",
-                                      ap->print_id);
-                               host_stat |= ATA_DMA_ERR;
-                               if (retry++)
-                                       ap->udma_mask &= ~(1 << qc->dev->xfer_mode);
-                       } else
-                               retry = 0;
-               }
-       }
-
-       return host_stat;
-}
-
-/**
- *     scc_data_xfer - Transfer data by PIO
- *     @dev: device for this I/O
- *     @buf: data buffer
- *     @buflen: buffer length
- *     @rw: read/write
- *
- *     Note: Original code is ata_sff_data_xfer().
- */
-
-static unsigned int scc_data_xfer (struct ata_device *dev, unsigned char *buf,
-                                  unsigned int buflen, int rw)
-{
-       struct ata_port *ap = dev->link->ap;
-       unsigned int words = buflen >> 1;
-       unsigned int i;
-       __le16 *buf16 = (__le16 *) buf;
-       void __iomem *mmio = ap->ioaddr.data_addr;
-
-       /* Transfer multiple of 2 bytes */
-       if (rw == READ)
-               for (i = 0; i < words; i++)
-                       buf16[i] = cpu_to_le16(in_be32(mmio));
-       else
-               for (i = 0; i < words; i++)
-                       out_be32(mmio, le16_to_cpu(buf16[i]));
-
-       /* Transfer trailing 1 byte, if any. */
-       if (unlikely(buflen & 0x01)) {
-               __le16 align_buf[1] = { 0 };
-               unsigned char *trailing_buf = buf + buflen - 1;
-
-               if (rw == READ) {
-                       align_buf[0] = cpu_to_le16(in_be32(mmio));
-                       memcpy(trailing_buf, align_buf, 1);
-               } else {
-                       memcpy(align_buf, trailing_buf, 1);
-                       out_be32(mmio, le16_to_cpu(align_buf[0]));
-               }
-               words++;
-       }
-
-       return words << 1;
-}
-
-/**
- *     scc_postreset - standard postreset callback
- *     @ap: the target ata_port
- *     @classes: classes of attached devices
- *
- *     Note: Original code is ata_sff_postreset().
- */
-
-static void scc_postreset(struct ata_link *link, unsigned int *classes)
-{
-       struct ata_port *ap = link->ap;
-
-       DPRINTK("ENTER\n");
-
-       /* is double-select really necessary? */
-       if (classes[0] != ATA_DEV_NONE)
-               ap->ops->sff_dev_select(ap, 1);
-       if (classes[1] != ATA_DEV_NONE)
-               ap->ops->sff_dev_select(ap, 0);
-
-       /* bail out if no device is present */
-       if (classes[0] == ATA_DEV_NONE && classes[1] == ATA_DEV_NONE) {
-               DPRINTK("EXIT, no device\n");
-               return;
-       }
-
-       /* set up device control */
-       out_be32(ap->ioaddr.ctl_addr, ap->ctl);
-
-       DPRINTK("EXIT\n");
-}
-
-/**
- *     scc_irq_clear - Clear PCI IDE BMDMA interrupt.
- *     @ap: Port associated with this ATA transaction.
- *
- *     Note: Original code is ata_bmdma_irq_clear().
- */
-
-static void scc_irq_clear (struct ata_port *ap)
-{
-       void __iomem *mmio = ap->ioaddr.bmdma_addr;
-
-       if (!mmio)
-               return;
-
-       out_be32(mmio + SCC_DMA_STATUS, in_be32(mmio + SCC_DMA_STATUS));
-}
-
-/**
- *     scc_port_start - Set port up for dma.
- *     @ap: Port to initialize
- *
- *     Allocate space for PRD table using ata_bmdma_port_start().
- *     Set PRD table address for PTERADD. (PRD Transfer End Read)
- */
-
-static int scc_port_start (struct ata_port *ap)
-{
-       void __iomem *mmio = ap->ioaddr.bmdma_addr;
-       int rc;
-
-       rc = ata_bmdma_port_start(ap);
-       if (rc)
-               return rc;
-
-       out_be32(mmio + SCC_DMA_PTERADD, ap->bmdma_prd_dma);
-       return 0;
-}
-
-/**
- *     scc_port_stop - Undo scc_port_start()
- *     @ap: Port to shut down
- *
- *     Reset PTERADD.
- */
-
-static void scc_port_stop (struct ata_port *ap)
-{
-       void __iomem *mmio = ap->ioaddr.bmdma_addr;
-
-       out_be32(mmio + SCC_DMA_PTERADD, 0);
-}
-
-static struct scsi_host_template scc_sht = {
-       ATA_BMDMA_SHT(DRV_NAME),
-};
-
-static struct ata_port_operations scc_pata_ops = {
-       .inherits               = &ata_bmdma_port_ops,
-
-       .set_piomode            = scc_set_piomode,
-       .set_dmamode            = scc_set_dmamode,
-       .mode_filter            = scc_mode_filter,
-
-       .sff_tf_load            = scc_tf_load,
-       .sff_tf_read            = scc_tf_read,
-       .sff_exec_command       = scc_exec_command,
-       .sff_check_status       = scc_check_status,
-       .sff_check_altstatus    = scc_check_altstatus,
-       .sff_dev_select         = scc_dev_select,
-       .sff_set_devctl         = scc_set_devctl,
-
-       .bmdma_setup            = scc_bmdma_setup,
-       .bmdma_start            = scc_bmdma_start,
-       .bmdma_stop             = scc_bmdma_stop,
-       .bmdma_status           = scc_bmdma_status,
-       .sff_data_xfer          = scc_data_xfer,
-
-       .cable_detect           = ata_cable_80wire,
-       .softreset              = scc_softreset,
-       .postreset              = scc_postreset,
-
-       .sff_irq_clear          = scc_irq_clear,
-
-       .port_start             = scc_port_start,
-       .port_stop              = scc_port_stop,
-};
-
-static struct ata_port_info scc_port_info[] = {
-       {
-               .flags          = ATA_FLAG_SLAVE_POSS,
-               .pio_mask       = ATA_PIO4,
-               /* No MWDMA */
-               .udma_mask      = ATA_UDMA6,
-               .port_ops       = &scc_pata_ops,
-       },
-};
-
-/**
- *     scc_reset_controller - initialize SCC PATA controller.
- */
-
-static int scc_reset_controller(struct ata_host *host)
-{
-       void __iomem *ctrl_base = host->iomap[SCC_CTRL_BAR];
-       void __iomem *bmid_base = host->iomap[SCC_BMID_BAR];
-       void __iomem *cckctrl_port = ctrl_base + SCC_CTL_CCKCTRL;
-       void __iomem *mode_port = ctrl_base + SCC_CTL_MODEREG;
-       void __iomem *ecmode_port = ctrl_base + SCC_CTL_ECMODE;
-       void __iomem *intmask_port = bmid_base + SCC_DMA_INTMASK;
-       void __iomem *dmastatus_port = bmid_base + SCC_DMA_STATUS;
-       u32 reg = 0;
-
-       out_be32(cckctrl_port, reg);
-       reg |= CCKCTRL_ATACLKOEN;
-       out_be32(cckctrl_port, reg);
-       reg |= CCKCTRL_LCLKEN | CCKCTRL_OCLKEN;
-       out_be32(cckctrl_port, reg);
-       reg |= CCKCTRL_CRST;
-       out_be32(cckctrl_port, reg);
-
-       for (;;) {
-               reg = in_be32(cckctrl_port);
-               if (reg & CCKCTRL_CRST)
-                       break;
-               udelay(5000);
-       }
-
-       reg |= CCKCTRL_ATARESET;
-       out_be32(cckctrl_port, reg);
-       out_be32(ecmode_port, ECMODE_VALUE);
-       out_be32(mode_port, MODE_JCUSFEN);
-       out_be32(intmask_port, INTMASK_MSK);
-
-       if (in_be32(dmastatus_port) & QCHSD_STPDIAG) {
-               printk(KERN_WARNING "%s: failed to detect 80c cable. (PDIAG# is high)\n", DRV_NAME);
-               return -EIO;
-       }
-
-       return 0;
-}
-
-/**
- *     scc_setup_ports - initialize ioaddr with SCC PATA port offsets.
- *     @ioaddr: IO address structure to be initialized
- *     @base: base address of BMID region
- */
-
-static void scc_setup_ports (struct ata_ioports *ioaddr, void __iomem *base)
-{
-       ioaddr->cmd_addr = base + SCC_REG_CMD_ADDR;
-       ioaddr->altstatus_addr = ioaddr->cmd_addr + SCC_REG_ALTSTATUS;
-       ioaddr->ctl_addr = ioaddr->cmd_addr + SCC_REG_ALTSTATUS;
-       ioaddr->bmdma_addr = base;
-       ioaddr->data_addr = ioaddr->cmd_addr + SCC_REG_DATA;
-       ioaddr->error_addr = ioaddr->cmd_addr + SCC_REG_ERR;
-       ioaddr->feature_addr = ioaddr->cmd_addr + SCC_REG_FEATURE;
-       ioaddr->nsect_addr = ioaddr->cmd_addr + SCC_REG_NSECT;
-       ioaddr->lbal_addr = ioaddr->cmd_addr + SCC_REG_LBAL;
-       ioaddr->lbam_addr = ioaddr->cmd_addr + SCC_REG_LBAM;
-       ioaddr->lbah_addr = ioaddr->cmd_addr + SCC_REG_LBAH;
-       ioaddr->device_addr = ioaddr->cmd_addr + SCC_REG_DEVICE;
-       ioaddr->status_addr = ioaddr->cmd_addr + SCC_REG_STATUS;
-       ioaddr->command_addr = ioaddr->cmd_addr + SCC_REG_CMD;
-}
-
-static int scc_host_init(struct ata_host *host)
-{
-       struct pci_dev *pdev = to_pci_dev(host->dev);
-       int rc;
-
-       rc = scc_reset_controller(host);
-       if (rc)
-               return rc;
-
-       rc = dma_set_mask(&pdev->dev, ATA_DMA_MASK);
-       if (rc)
-               return rc;
-       rc = dma_set_coherent_mask(&pdev->dev, ATA_DMA_MASK);
-       if (rc)
-               return rc;
-
-       scc_setup_ports(&host->ports[0]->ioaddr, host->iomap[SCC_BMID_BAR]);
-
-       pci_set_master(pdev);
-
-       return 0;
-}
-
-/**
- *     scc_init_one - Register SCC PATA device with kernel services
- *     @pdev: PCI device to register
- *     @ent: Entry in scc_pci_tbl matching with @pdev
- *
- *     LOCKING:
- *     Inherited from PCI layer (may sleep).
- *
- *     RETURNS:
- *     Zero on success, or -ERRNO value.
- */
-
-static int scc_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
-{
-       unsigned int board_idx = (unsigned int) ent->driver_data;
-       const struct ata_port_info *ppi[] = { &scc_port_info[board_idx], NULL };
-       struct ata_host *host;
-       int rc;
-
-       ata_print_version_once(&pdev->dev, DRV_VERSION);
-
-       host = ata_host_alloc_pinfo(&pdev->dev, ppi, 1);
-       if (!host)
-               return -ENOMEM;
-
-       rc = pcim_enable_device(pdev);
-       if (rc)
-               return rc;
-
-       rc = pcim_iomap_regions(pdev, (1 << SCC_CTRL_BAR) | (1 << SCC_BMID_BAR), DRV_NAME);
-       if (rc == -EBUSY)
-               pcim_pin_device(pdev);
-       if (rc)
-               return rc;
-       host->iomap = pcim_iomap_table(pdev);
-
-       ata_port_pbar_desc(host->ports[0], SCC_CTRL_BAR, -1, "ctrl");
-       ata_port_pbar_desc(host->ports[0], SCC_BMID_BAR, -1, "bmid");
-
-       rc = scc_host_init(host);
-       if (rc)
-               return rc;
-
-       return ata_host_activate(host, pdev->irq, ata_bmdma_interrupt,
-                                IRQF_SHARED, &scc_sht);
-}
-
-static struct pci_driver scc_pci_driver = {
-       .name                   = DRV_NAME,
-       .id_table               = scc_pci_tbl,
-       .probe                  = scc_init_one,
-       .remove                 = ata_pci_remove_one,
-#ifdef CONFIG_PM_SLEEP
-       .suspend                = ata_pci_device_suspend,
-       .resume                 = ata_pci_device_resume,
-#endif
-};
-
-module_pci_driver(scc_pci_driver);
-
-MODULE_AUTHOR("Toshiba corp");
-MODULE_DESCRIPTION("SCSI low-level driver for Toshiba SCC PATA controller");
-MODULE_LICENSE("GPL");
-MODULE_DEVICE_TABLE(pci, scc_pci_tbl);
-MODULE_VERSION(DRV_VERSION);
index 9c2ba1c97c4257016503a8ed4d2166ac19dea9c0..df0c66cb7ad3719016436dd7eb16ab1d3234568d 100644 (file)
@@ -179,7 +179,7 @@ static int detect_cache_attributes(unsigned int cpu)
 {
        int ret;
 
-       if (init_cache_level(cpu))
+       if (init_cache_level(cpu) || !cache_leaves(cpu))
                return -ENOENT;
 
        per_cpu_cacheinfo(cpu) = kcalloc(cache_leaves(cpu),
index da033d3bab3c69d14e55d63c4286632905120ae2..48c0e220acc0a1b8192ca6b523ad35ab7073eba7 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/device.h>
 #include <linux/init.h>
 #include <linux/memory.h>
+#include <linux/of.h>
 
 #include "base.h"
 
@@ -34,4 +35,5 @@ void __init driver_init(void)
        cpu_dev_init();
        memory_dev_init();
        container_dev_init();
+       of_core_init();
 }
index 6491f45200a78681ce7ec95e65339292f8d194ab..8b7d7f8e58518448a53eeba640886d490fb75bee 100644 (file)
@@ -61,7 +61,7 @@ static DEVICE_ATTR_RO(physical_package_id);
 define_id_show_func(core_id);
 static DEVICE_ATTR_RO(core_id);
 
-define_siblings_show_func(thread_siblings, thread_cpumask);
+define_siblings_show_func(thread_siblings, sibling_cpumask);
 static DEVICE_ATTR_RO(thread_siblings);
 static DEVICE_ATTR_RO(thread_siblings_list);
 
index eb1fed5bd516ffac33c850eed47fad402250c686..3ccef9eba6f9dc53cecb785c23582cbdeb3b8618 100644 (file)
@@ -406,6 +406,7 @@ config BLK_DEV_RAM_DAX
 
 config BLK_DEV_PMEM
        tristate "Persistent memory block device support"
+       depends on HAS_IOMEM
        help
          Saying Y here will allow you to use a contiguous range of reserved
          memory as one or more persistent block devices.
index ae3fcb4199e9b7d85d2475d40ab4f209258a1cc5..d7173cb1ea76c206f1fcedbc96994e45901aa322 100644 (file)
@@ -1620,8 +1620,8 @@ out:
 
 static void loop_remove(struct loop_device *lo)
 {
-       del_gendisk(lo->lo_disk);
        blk_cleanup_queue(lo->lo_queue);
+       del_gendisk(lo->lo_disk);
        blk_mq_free_tag_set(&lo->tag_set);
        put_disk(lo->lo_disk);
        kfree(lo);
index 85b8036deaa3b7daaba5317ed746936a1f5183db..683dff272562b16d325df65495ad6a868cf45b14 100644 (file)
@@ -1750,6 +1750,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
        struct nvme_iod *iod;
        dma_addr_t meta_dma = 0;
        void *meta = NULL;
+       void __user *metadata;
 
        if (copy_from_user(&io, uio, sizeof(io)))
                return -EFAULT;
@@ -1763,6 +1764,8 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
                meta_len = 0;
        }
 
+       metadata = (void __user *)(unsigned long)io.metadata;
+
        write = io.opcode & 1;
 
        switch (io.opcode) {
@@ -1786,13 +1789,13 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
        if (meta_len) {
                meta = dma_alloc_coherent(&dev->pci_dev->dev, meta_len,
                                                &meta_dma, GFP_KERNEL);
+
                if (!meta) {
                        status = -ENOMEM;
                        goto unmap;
                }
                if (write) {
-                       if (copy_from_user(meta, (void __user *)io.metadata,
-                                                               meta_len)) {
+                       if (copy_from_user(meta, metadata, meta_len)) {
                                status = -EFAULT;
                                goto unmap;
                        }
@@ -1819,8 +1822,7 @@ static int nvme_submit_io(struct nvme_ns *ns, struct nvme_user_io __user *uio)
        nvme_free_iod(dev, iod);
        if (meta) {
                if (status == NVME_SC_SUCCESS && !write) {
-                       if (copy_to_user((void __user *)io.metadata, meta,
-                                                               meta_len))
+                       if (copy_to_user(metadata, meta, meta_len))
                                status = -EFAULT;
                }
                dma_free_coherent(&dev->pci_dev->dev, meta_len, meta, meta_dma);
index 6b736b00f63ebbbf01db7eb037695cbd77bca8c8..44f2514fb7755d0bdf9f4524ebbe5364a84af5a3 100644 (file)
@@ -944,7 +944,8 @@ static int nvme_trans_ext_inq_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
 static int nvme_trans_bdev_limits_page(struct nvme_ns *ns, struct sg_io_hdr *hdr,
                                        u8 *inq_response, int alloc_len)
 {
-       __be32 max_sectors = cpu_to_be32(queue_max_hw_sectors(ns->queue));
+       __be32 max_sectors = cpu_to_be32(
+               nvme_block_nr(ns, queue_max_hw_sectors(ns->queue)));
        __be32 max_discard = cpu_to_be32(ns->queue->limits.max_discard_sectors);
        __be32 discard_desc_count = cpu_to_be32(0x100);
 
@@ -2256,7 +2257,8 @@ static int nvme_trans_inquiry(struct nvme_ns *ns, struct sg_io_hdr *hdr,
        page_code = GET_INQ_PAGE_CODE(cmd);
        alloc_len = GET_INQ_ALLOC_LENGTH(cmd);
 
-       inq_response = kmalloc(alloc_len, GFP_KERNEL);
+       inq_response = kmalloc(max(alloc_len, STANDARD_INQUIRY_LENGTH),
+                               GFP_KERNEL);
        if (inq_response == NULL) {
                res = -ENOMEM;
                goto out_mem;
index eabf4a8d00855ef06c2c2e9cd4178954903fe47c..095dfaadcaa5f3ceefd42acd8c044a8103d71c44 100644 (file)
@@ -139,11 +139,11 @@ static struct pmem_device *pmem_alloc(struct device *dev, struct resource *res)
        }
 
        /*
-        * Map the memory as non-cachable, as we can't write back the contents
+        * Map the memory as write-through, as we can't write back the contents
         * of the CPU caches in case of a crash.
         */
        err = -ENOMEM;
-       pmem->virt_addr = ioremap_nocache(pmem->phys_addr, pmem->size);
+       pmem->virt_addr = ioremap_wt(pmem->phys_addr, pmem->size);
        if (!pmem->virt_addr)
                goto out_release_region;
 
index 8dcbced0eafd5f8dc0a53dc8d8e9d4b37bad9bab..6e134f4759c0c9e98b93f221e7687004d4418342 100644 (file)
@@ -805,7 +805,9 @@ static void zram_reset_device(struct zram *zram)
        memset(&zram->stats, 0, sizeof(zram->stats));
        zram->disksize = 0;
        zram->max_comp_streams = 1;
+
        set_capacity(zram->disk, 0);
+       part_stat_set_all(&zram->disk->part0, 0);
 
        up_write(&zram->init_lock);
        /* I/O operation under all of CPU are done so let's free */
index 288547a3c566753d146e28924c7d3b023e8c0d3b..8c81af6dbe06365462b3c1d56481385a2db60881 100644 (file)
@@ -88,6 +88,7 @@ static const struct usb_device_id ath3k_table[] = {
        { USB_DEVICE(0x04CA, 0x3007) },
        { USB_DEVICE(0x04CA, 0x3008) },
        { USB_DEVICE(0x04CA, 0x300b) },
+       { USB_DEVICE(0x04CA, 0x300f) },
        { USB_DEVICE(0x04CA, 0x3010) },
        { USB_DEVICE(0x0930, 0x0219) },
        { USB_DEVICE(0x0930, 0x0220) },
@@ -104,6 +105,7 @@ static const struct usb_device_id ath3k_table[] = {
        { USB_DEVICE(0x0cf3, 0xe003) },
        { USB_DEVICE(0x0CF3, 0xE004) },
        { USB_DEVICE(0x0CF3, 0xE005) },
+       { USB_DEVICE(0x0CF3, 0xE006) },
        { USB_DEVICE(0x13d3, 0x3362) },
        { USB_DEVICE(0x13d3, 0x3375) },
        { USB_DEVICE(0x13d3, 0x3393) },
@@ -143,6 +145,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
        { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
@@ -158,6 +161,7 @@ static const struct usb_device_id ath3k_blist_tbl[] = {
        { USB_DEVICE(0x0CF3, 0x817a), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
index 4f7e8d400bc01a178bcbfcf499340b40545fb54f..6de97b3871b0f8cffe4d2c9b2b78eb5e87c48247 100644 (file)
@@ -227,7 +227,6 @@ static void bt3c_receive(struct bt3c_info *info)
        iobase = info->p_dev->resource[0]->start;
 
        avail = bt3c_read(iobase, 0x7006);
-       //printk("bt3c_cs: receiving %d bytes\n", avail);
 
        bt3c_address(iobase, 0x7480);
        while (size < avail) {
@@ -250,7 +249,6 @@ static void bt3c_receive(struct bt3c_info *info)
 
                        bt_cb(info->rx_skb)->pkt_type = inb(iobase + DATA_L);
                        inb(iobase + DATA_H);
-                       //printk("bt3c: PACKET_TYPE=%02x\n", bt_cb(info->rx_skb)->pkt_type);
 
                        switch (bt_cb(info->rx_skb)->pkt_type) {
 
@@ -364,7 +362,6 @@ static irqreturn_t bt3c_interrupt(int irq, void *dev_inst)
                        if (stat & 0x0001)
                                bt3c_receive(info);
                        if (stat & 0x0002) {
-                               //BT_ERR("Ack (stat=0x%04x)", stat);
                                clear_bit(XMIT_SENDING, &(info->tx_state));
                                bt3c_write_wakeup(info);
                        }
index d0741f3ed7ec36b49f1c50ff119fabc7368d3a9e..4bba86677adc64553fe8415d9b6812bfb3e1449d 100644 (file)
@@ -95,6 +95,78 @@ int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
 }
 EXPORT_SYMBOL_GPL(btbcm_set_bdaddr);
 
+int btbcm_patchram(struct hci_dev *hdev, const char *firmware)
+{
+       const struct hci_command_hdr *cmd;
+       const struct firmware *fw;
+       const u8 *fw_ptr;
+       size_t fw_size;
+       struct sk_buff *skb;
+       u16 opcode;
+       int err;
+
+       err = request_firmware(&fw, firmware, &hdev->dev);
+       if (err < 0) {
+               BT_INFO("%s: BCM: Patch %s not found", hdev->name, firmware);
+               return err;
+       }
+
+       /* Start Download */
+       skb = __hci_cmd_sync(hdev, 0xfc2e, 0, NULL, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               err = PTR_ERR(skb);
+               BT_ERR("%s: BCM: Download Minidrv command failed (%d)",
+                      hdev->name, err);
+               goto done;
+       }
+       kfree_skb(skb);
+
+       /* 50 msec delay after Download Minidrv completes */
+       msleep(50);
+
+       fw_ptr = fw->data;
+       fw_size = fw->size;
+
+       while (fw_size >= sizeof(*cmd)) {
+               const u8 *cmd_param;
+
+               cmd = (struct hci_command_hdr *)fw_ptr;
+               fw_ptr += sizeof(*cmd);
+               fw_size -= sizeof(*cmd);
+
+               if (fw_size < cmd->plen) {
+                       BT_ERR("%s: BCM: Patch %s is corrupted", hdev->name,
+                              firmware);
+                       err = -EINVAL;
+                       goto done;
+               }
+
+               cmd_param = fw_ptr;
+               fw_ptr += cmd->plen;
+               fw_size -= cmd->plen;
+
+               opcode = le16_to_cpu(cmd->opcode);
+
+               skb = __hci_cmd_sync(hdev, opcode, cmd->plen, cmd_param,
+                                    HCI_INIT_TIMEOUT);
+               if (IS_ERR(skb)) {
+                       err = PTR_ERR(skb);
+                       BT_ERR("%s: BCM: Patch command %04x failed (%d)",
+                              hdev->name, opcode, err);
+                       goto done;
+               }
+               kfree_skb(skb);
+       }
+
+       /* 250 msec delay after Launch Ram completes */
+       msleep(250);
+
+done:
+       release_firmware(fw);
+       return err;
+}
+EXPORT_SYMBOL(btbcm_patchram);
+
 static int btbcm_reset(struct hci_dev *hdev)
 {
        struct sk_buff *skb;
@@ -198,12 +270,8 @@ static const struct {
 
 int btbcm_setup_patchram(struct hci_dev *hdev)
 {
-       const struct hci_command_hdr *cmd;
-       const struct firmware *fw;
-       const u8 *fw_ptr;
-       size_t fw_size;
        char fw_name[64];
-       u16 opcode, subver, rev, pid, vid;
+       u16 subver, rev, pid, vid;
        const char *hw_name = NULL;
        struct sk_buff *skb;
        struct hci_rp_read_local_version *ver;
@@ -273,74 +341,19 @@ int btbcm_setup_patchram(struct hci_dev *hdev)
                hw_name ? : "BCM", (subver & 0x7000) >> 13,
                (subver & 0x1f00) >> 8, (subver & 0x00ff), rev & 0x0fff);
 
-       err = request_firmware(&fw, fw_name, &hdev->dev);
-       if (err < 0) {
-               BT_INFO("%s: BCM: patch %s not found", hdev->name, fw_name);
+       err = btbcm_patchram(hdev, fw_name);
+       if (err == -ENOENT)
                return 0;
-       }
-
-       /* Start Download */
-       skb = __hci_cmd_sync(hdev, 0xfc2e, 0, NULL, HCI_INIT_TIMEOUT);
-       if (IS_ERR(skb)) {
-               err = PTR_ERR(skb);
-               BT_ERR("%s: BCM: Download Minidrv command failed (%d)",
-                      hdev->name, err);
-               goto reset;
-       }
-       kfree_skb(skb);
-
-       /* 50 msec delay after Download Minidrv completes */
-       msleep(50);
-
-       fw_ptr = fw->data;
-       fw_size = fw->size;
-
-       while (fw_size >= sizeof(*cmd)) {
-               const u8 *cmd_param;
-
-               cmd = (struct hci_command_hdr *)fw_ptr;
-               fw_ptr += sizeof(*cmd);
-               fw_size -= sizeof(*cmd);
-
-               if (fw_size < cmd->plen) {
-                       BT_ERR("%s: BCM: patch %s is corrupted", hdev->name,
-                              fw_name);
-                       err = -EINVAL;
-                       goto reset;
-               }
 
-               cmd_param = fw_ptr;
-               fw_ptr += cmd->plen;
-               fw_size -= cmd->plen;
-
-               opcode = le16_to_cpu(cmd->opcode);
-
-               skb = __hci_cmd_sync(hdev, opcode, cmd->plen, cmd_param,
-                                    HCI_INIT_TIMEOUT);
-               if (IS_ERR(skb)) {
-                       err = PTR_ERR(skb);
-                       BT_ERR("%s: BCM: patch command %04x failed (%d)",
-                              hdev->name, opcode, err);
-                       goto reset;
-               }
-               kfree_skb(skb);
-       }
-
-       /* 250 msec delay after Launch Ram completes */
-       msleep(250);
-
-reset:
        /* Reset */
        err = btbcm_reset(hdev);
        if (err)
-               goto done;
+               return err;
 
        /* Read Local Version Info */
        skb = btbcm_read_local_version(hdev);
-       if (IS_ERR(skb)) {
-               err = PTR_ERR(skb);
-               goto done;
-       }
+       if (IS_ERR(skb))
+               return PTR_ERR(skb);
 
        ver = (struct hci_rp_read_local_version *)skb->data;
        rev = le16_to_cpu(ver->hci_rev);
@@ -355,10 +368,7 @@ reset:
 
        set_bit(HCI_QUIRK_STRICT_DUPLICATE_FILTER, &hdev->quirks);
 
-done:
-       release_firmware(fw);
-
-       return err;
+       return 0;
 }
 EXPORT_SYMBOL_GPL(btbcm_setup_patchram);
 
index 34268ae3eb4607e16f5c9e017b23cb904a80b459..eb6ab5f9483d3b510ab6a2decfb1d694af8facb7 100644 (file)
@@ -25,6 +25,7 @@
 
 int btbcm_check_bdaddr(struct hci_dev *hdev);
 int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr);
+int btbcm_patchram(struct hci_dev *hdev, const char *firmware);
 
 int btbcm_setup_patchram(struct hci_dev *hdev);
 int btbcm_setup_apple(struct hci_dev *hdev);
@@ -41,6 +42,11 @@ static inline int btbcm_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
        return -EOPNOTSUPP;
 }
 
+static inline int btbcm_patchram(struct hci_dev *hdev, const char *firmware)
+{
+       return -EOPNOTSUPP;
+}
+
 static inline int btbcm_setup_patchram(struct hci_dev *hdev)
 {
        return 0;
index de7b236eeae7777f71389ec42c233525d309aef2..3c10d4dfe9a790e6e34f12022b1bcc2321ac648c 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/module.h>
 #include <linux/usb.h>
 #include <linux/firmware.h>
+#include <asm/unaligned.h>
 
 #include <net/bluetooth/bluetooth.h>
 #include <net/bluetooth/hci_core.h>
@@ -57,6 +58,7 @@ static struct usb_driver btusb_driver;
 #define BTUSB_AMP              0x4000
 #define BTUSB_QCA_ROME         0x8000
 #define BTUSB_BCM_APPLE                0x10000
+#define BTUSB_REALTEK          0x20000
 
 static const struct usb_device_id btusb_table[] = {
        /* Generic Bluetooth USB device */
@@ -184,6 +186,7 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x04ca, 0x3007), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3008), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x300b), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x04ca, 0x300f), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x04ca, 0x3010), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0219), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0930, 0x0220), .driver_info = BTUSB_ATH3012 },
@@ -200,6 +203,7 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x0cf3, 0xe003), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe004), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x0cf3, 0xe005), .driver_info = BTUSB_ATH3012 },
+       { USB_DEVICE(0x0cf3, 0xe006), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3362), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3375), .driver_info = BTUSB_ATH3012 },
        { USB_DEVICE(0x13d3, 0x3393), .driver_info = BTUSB_ATH3012 },
@@ -216,6 +220,7 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_DEVICE(0x0489, 0xe03c), .driver_info = BTUSB_ATH3012 },
 
        /* QCA ROME chipset */
+       { USB_DEVICE(0x0cf3, 0xe007), .driver_info = BTUSB_QCA_ROME },
        { USB_DEVICE(0x0cf3, 0xe300), .driver_info = BTUSB_QCA_ROME },
        { USB_DEVICE(0x0cf3, 0xe360), .driver_info = BTUSB_QCA_ROME },
 
@@ -288,6 +293,28 @@ static const struct usb_device_id blacklist_table[] = {
        { USB_VENDOR_AND_INTERFACE_INFO(0x8087, 0xe0, 0x01, 0x01),
          .driver_info = BTUSB_IGNORE },
 
+       /* Realtek Bluetooth devices */
+       { USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01),
+         .driver_info = BTUSB_REALTEK },
+
+       /* Additional Realtek 8723AE Bluetooth devices */
+       { USB_DEVICE(0x0930, 0x021d), .driver_info = BTUSB_REALTEK },
+       { USB_DEVICE(0x13d3, 0x3394), .driver_info = BTUSB_REALTEK },
+
+       /* Additional Realtek 8723BE Bluetooth devices */
+       { USB_DEVICE(0x0489, 0xe085), .driver_info = BTUSB_REALTEK },
+       { USB_DEVICE(0x0489, 0xe08b), .driver_info = BTUSB_REALTEK },
+       { USB_DEVICE(0x13d3, 0x3410), .driver_info = BTUSB_REALTEK },
+       { USB_DEVICE(0x13d3, 0x3416), .driver_info = BTUSB_REALTEK },
+       { USB_DEVICE(0x13d3, 0x3459), .driver_info = BTUSB_REALTEK },
+
+       /* Additional Realtek 8821AE Bluetooth devices */
+       { USB_DEVICE(0x0b05, 0x17dc), .driver_info = BTUSB_REALTEK },
+       { USB_DEVICE(0x13d3, 0x3414), .driver_info = BTUSB_REALTEK },
+       { USB_DEVICE(0x13d3, 0x3458), .driver_info = BTUSB_REALTEK },
+       { USB_DEVICE(0x13d3, 0x3461), .driver_info = BTUSB_REALTEK },
+       { USB_DEVICE(0x13d3, 0x3462), .driver_info = BTUSB_REALTEK },
+
        { }     /* Terminating entry */
 };
 
@@ -892,7 +919,7 @@ static int btusb_open(struct hci_dev *hdev)
         */
        if (data->setup_on_usb) {
                err = data->setup_on_usb(hdev);
-               if (err <0)
+               if (err < 0)
                        return err;
        }
 
@@ -1345,6 +1372,378 @@ static int btusb_setup_csr(struct hci_dev *hdev)
        return ret;
 }
 
+#define RTL_FRAG_LEN 252
+
+struct rtl_download_cmd {
+       __u8 index;
+       __u8 data[RTL_FRAG_LEN];
+} __packed;
+
+struct rtl_download_response {
+       __u8 status;
+       __u8 index;
+} __packed;
+
+struct rtl_rom_version_evt {
+       __u8 status;
+       __u8 version;
+} __packed;
+
+struct rtl_epatch_header {
+       __u8 signature[8];
+       __le32 fw_version;
+       __le16 num_patches;
+} __packed;
+
+#define RTL_EPATCH_SIGNATURE   "Realtech"
+#define RTL_ROM_LMP_3499       0x3499
+#define RTL_ROM_LMP_8723A      0x1200
+#define RTL_ROM_LMP_8723B      0x8723
+#define RTL_ROM_LMP_8821A      0x8821
+#define RTL_ROM_LMP_8761A      0x8761
+
+static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version)
+{
+       struct rtl_rom_version_evt *rom_version;
+       struct sk_buff *skb;
+       int ret;
+
+       /* Read RTL ROM version command */
+       skb = __hci_cmd_sync(hdev, 0xfc6d, 0, NULL, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               BT_ERR("%s: Read ROM version failed (%ld)",
+                      hdev->name, PTR_ERR(skb));
+               return PTR_ERR(skb);
+       }
+
+       if (skb->len != sizeof(*rom_version)) {
+               BT_ERR("%s: RTL version event length mismatch", hdev->name);
+               kfree_skb(skb);
+               return -EIO;
+       }
+
+       rom_version = (struct rtl_rom_version_evt *)skb->data;
+       BT_INFO("%s: rom_version status=%x version=%x",
+               hdev->name, rom_version->status, rom_version->version);
+
+       ret = rom_version->status;
+       if (ret == 0)
+               *version = rom_version->version;
+
+       kfree_skb(skb);
+       return ret;
+}
+
+static int rtl8723b_parse_firmware(struct hci_dev *hdev, u16 lmp_subver,
+                                  const struct firmware *fw,
+                                  unsigned char **_buf)
+{
+       const u8 extension_sig[] = { 0x51, 0x04, 0xfd, 0x77 };
+       struct rtl_epatch_header *epatch_info;
+       unsigned char *buf;
+       int i, ret, len;
+       size_t min_size;
+       u8 opcode, length, data, rom_version = 0;
+       int project_id = -1;
+       const unsigned char *fwptr, *chip_id_base;
+       const unsigned char *patch_length_base, *patch_offset_base;
+       u32 patch_offset = 0;
+       u16 patch_length, num_patches;
+       const u16 project_id_to_lmp_subver[] = {
+               RTL_ROM_LMP_8723A,
+               RTL_ROM_LMP_8723B,
+               RTL_ROM_LMP_8821A,
+               RTL_ROM_LMP_8761A
+       };
+
+       ret = rtl_read_rom_version(hdev, &rom_version);
+       if (ret)
+               return -bt_to_errno(ret);
+
+       min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3;
+       if (fw->size < min_size)
+               return -EINVAL;
+
+       fwptr = fw->data + fw->size - sizeof(extension_sig);
+       if (memcmp(fwptr, extension_sig, sizeof(extension_sig)) != 0) {
+               BT_ERR("%s: extension section signature mismatch", hdev->name);
+               return -EINVAL;
+       }
+
+       /* Loop from the end of the firmware parsing instructions, until
+        * we find an instruction that identifies the "project ID" for the
+        * hardware supported by this firwmare file.
+        * Once we have that, we double-check that that project_id is suitable
+        * for the hardware we are working with.
+        */
+       while (fwptr >= fw->data + (sizeof(struct rtl_epatch_header) + 3)) {
+               opcode = *--fwptr;
+               length = *--fwptr;
+               data = *--fwptr;
+
+               BT_DBG("check op=%x len=%x data=%x", opcode, length, data);
+
+               if (opcode == 0xff) /* EOF */
+                       break;
+
+               if (length == 0) {
+                       BT_ERR("%s: found instruction with length 0",
+                              hdev->name);
+                       return -EINVAL;
+               }
+
+               if (opcode == 0 && length == 1) {
+                       project_id = data;
+                       break;
+               }
+
+               fwptr -= length;
+       }
+
+       if (project_id < 0) {
+               BT_ERR("%s: failed to find version instruction", hdev->name);
+               return -EINVAL;
+       }
+
+       if (project_id >= ARRAY_SIZE(project_id_to_lmp_subver)) {
+               BT_ERR("%s: unknown project id %d", hdev->name, project_id);
+               return -EINVAL;
+       }
+
+       if (lmp_subver != project_id_to_lmp_subver[project_id]) {
+               BT_ERR("%s: firmware is for %x but this is a %x", hdev->name,
+                      project_id_to_lmp_subver[project_id], lmp_subver);
+               return -EINVAL;
+       }
+
+       epatch_info = (struct rtl_epatch_header *)fw->data;
+       if (memcmp(epatch_info->signature, RTL_EPATCH_SIGNATURE, 8) != 0) {
+               BT_ERR("%s: bad EPATCH signature", hdev->name);
+               return -EINVAL;
+       }
+
+       num_patches = le16_to_cpu(epatch_info->num_patches);
+       BT_DBG("fw_version=%x, num_patches=%d",
+              le32_to_cpu(epatch_info->fw_version), num_patches);
+
+       /* After the rtl_epatch_header there is a funky patch metadata section.
+        * Assuming 2 patches, the layout is:
+        * ChipID1 ChipID2 PatchLength1 PatchLength2 PatchOffset1 PatchOffset2
+        *
+        * Find the right patch for this chip.
+        */
+       min_size += 8 * num_patches;
+       if (fw->size < min_size)
+               return -EINVAL;
+
+       chip_id_base = fw->data + sizeof(struct rtl_epatch_header);
+       patch_length_base = chip_id_base + (sizeof(u16) * num_patches);
+       patch_offset_base = patch_length_base + (sizeof(u16) * num_patches);
+       for (i = 0; i < num_patches; i++) {
+               u16 chip_id = get_unaligned_le16(chip_id_base +
+                                                (i * sizeof(u16)));
+               if (chip_id == rom_version + 1) {
+                       patch_length = get_unaligned_le16(patch_length_base +
+                                                         (i * sizeof(u16)));
+                       patch_offset = get_unaligned_le32(patch_offset_base +
+                                                         (i * sizeof(u32)));
+                       break;
+               }
+       }
+
+       if (!patch_offset) {
+               BT_ERR("%s: didn't find patch for chip id %d",
+                      hdev->name, rom_version);
+               return -EINVAL;
+       }
+
+       BT_DBG("length=%x offset=%x index %d", patch_length, patch_offset, i);
+       min_size = patch_offset + patch_length;
+       if (fw->size < min_size)
+               return -EINVAL;
+
+       /* Copy the firmware into a new buffer and write the version at
+        * the end.
+        */
+       len = patch_length;
+       buf = kmemdup(fw->data + patch_offset, patch_length, GFP_KERNEL);
+       if (!buf)
+               return -ENOMEM;
+
+       memcpy(buf + patch_length - 4, &epatch_info->fw_version, 4);
+
+       *_buf = buf;
+       return len;
+}
+
+static int rtl_download_firmware(struct hci_dev *hdev,
+                                const unsigned char *data, int fw_len)
+{
+       struct rtl_download_cmd *dl_cmd;
+       int frag_num = fw_len / RTL_FRAG_LEN + 1;
+       int frag_len = RTL_FRAG_LEN;
+       int ret = 0;
+       int i;
+
+       dl_cmd = kmalloc(sizeof(struct rtl_download_cmd), GFP_KERNEL);
+       if (!dl_cmd)
+               return -ENOMEM;
+
+       for (i = 0; i < frag_num; i++) {
+               struct rtl_download_response *dl_resp;
+               struct sk_buff *skb;
+
+               BT_DBG("download fw (%d/%d)", i, frag_num);
+
+               dl_cmd->index = i;
+               if (i == (frag_num - 1)) {
+                       dl_cmd->index |= 0x80; /* data end */
+                       frag_len = fw_len % RTL_FRAG_LEN;
+               }
+               memcpy(dl_cmd->data, data, frag_len);
+
+               /* Send download command */
+               skb = __hci_cmd_sync(hdev, 0xfc20, frag_len + 1, dl_cmd,
+                                    HCI_INIT_TIMEOUT);
+               if (IS_ERR(skb)) {
+                       BT_ERR("%s: download fw command failed (%ld)",
+                              hdev->name, PTR_ERR(skb));
+                       ret = -PTR_ERR(skb);
+                       goto out;
+               }
+
+               if (skb->len != sizeof(*dl_resp)) {
+                       BT_ERR("%s: download fw event length mismatch",
+                              hdev->name);
+                       kfree_skb(skb);
+                       ret = -EIO;
+                       goto out;
+               }
+
+               dl_resp = (struct rtl_download_response *)skb->data;
+               if (dl_resp->status != 0) {
+                       kfree_skb(skb);
+                       ret = bt_to_errno(dl_resp->status);
+                       goto out;
+               }
+
+               kfree_skb(skb);
+               data += RTL_FRAG_LEN;
+       }
+
+out:
+       kfree(dl_cmd);
+       return ret;
+}
+
+static int btusb_setup_rtl8723a(struct hci_dev *hdev)
+{
+       struct btusb_data *data = dev_get_drvdata(&hdev->dev);
+       struct usb_device *udev = interface_to_usbdev(data->intf);
+       const struct firmware *fw;
+       int ret;
+
+       BT_INFO("%s: rtl: loading rtl_bt/rtl8723a_fw.bin", hdev->name);
+       ret = request_firmware(&fw, "rtl_bt/rtl8723a_fw.bin", &udev->dev);
+       if (ret < 0) {
+               BT_ERR("%s: Failed to load rtl_bt/rtl8723a_fw.bin", hdev->name);
+               return ret;
+       }
+
+       if (fw->size < 8) {
+               ret = -EINVAL;
+               goto out;
+       }
+
+       /* Check that the firmware doesn't have the epatch signature
+        * (which is only for RTL8723B and newer).
+        */
+       if (!memcmp(fw->data, RTL_EPATCH_SIGNATURE, 8)) {
+               BT_ERR("%s: unexpected EPATCH signature!", hdev->name);
+               ret = -EINVAL;
+               goto out;
+       }
+
+       ret = rtl_download_firmware(hdev, fw->data, fw->size);
+
+out:
+       release_firmware(fw);
+       return ret;
+}
+
+static int btusb_setup_rtl8723b(struct hci_dev *hdev, u16 lmp_subver,
+                               const char *fw_name)
+{
+       struct btusb_data *data = dev_get_drvdata(&hdev->dev);
+       struct usb_device *udev = interface_to_usbdev(data->intf);
+       unsigned char *fw_data = NULL;
+       const struct firmware *fw;
+       int ret;
+
+       BT_INFO("%s: rtl: loading %s", hdev->name, fw_name);
+       ret = request_firmware(&fw, fw_name, &udev->dev);
+       if (ret < 0) {
+               BT_ERR("%s: Failed to load %s", hdev->name, fw_name);
+               return ret;
+       }
+
+       ret = rtl8723b_parse_firmware(hdev, lmp_subver, fw, &fw_data);
+       if (ret < 0)
+               goto out;
+
+       ret = rtl_download_firmware(hdev, fw_data, ret);
+       kfree(fw_data);
+       if (ret < 0)
+               goto out;
+
+out:
+       release_firmware(fw);
+       return ret;
+}
+
+static int btusb_setup_realtek(struct hci_dev *hdev)
+{
+       struct sk_buff *skb;
+       struct hci_rp_read_local_version *resp;
+       u16 lmp_subver;
+
+       skb = btusb_read_local_version(hdev);
+       if (IS_ERR(skb))
+               return -PTR_ERR(skb);
+
+       resp = (struct hci_rp_read_local_version *)skb->data;
+       BT_INFO("%s: rtl: examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x "
+               "lmp_subver=%04x", hdev->name, resp->hci_ver, resp->hci_rev,
+               resp->lmp_ver, resp->lmp_subver);
+
+       lmp_subver = le16_to_cpu(resp->lmp_subver);
+       kfree_skb(skb);
+
+       /* Match a set of subver values that correspond to stock firmware,
+        * which is not compatible with standard btusb.
+        * If matched, upload an alternative firmware that does conform to
+        * standard btusb. Once that firmware is uploaded, the subver changes
+        * to a different value.
+        */
+       switch (lmp_subver) {
+       case RTL_ROM_LMP_8723A:
+       case RTL_ROM_LMP_3499:
+               return btusb_setup_rtl8723a(hdev);
+       case RTL_ROM_LMP_8723B:
+               return btusb_setup_rtl8723b(hdev, lmp_subver,
+                                           "rtl_bt/rtl8723b_fw.bin");
+       case RTL_ROM_LMP_8821A:
+               return btusb_setup_rtl8723b(hdev, lmp_subver,
+                                           "rtl_bt/rtl8821a_fw.bin");
+       case RTL_ROM_LMP_8761A:
+               return btusb_setup_rtl8723b(hdev, lmp_subver,
+                                           "rtl_bt/rtl8761a_fw.bin");
+       default:
+               BT_INFO("rtl: assuming no firmware upload needed.");
+               return 0;
+       }
+}
+
 static const struct firmware *btusb_setup_intel_get_fw(struct hci_dev *hdev,
                                                       struct intel_version *ver)
 {
@@ -2577,7 +2976,7 @@ static int btusb_setup_qca(struct hci_dev *hdev)
        int i, err;
 
        err = btusb_qca_send_vendor_req(hdev, QCA_GET_TARGET_VERSION, &ver,
-                                       sizeof(ver));
+                                       sizeof(ver));
        if (err < 0)
                return err;
 
@@ -2776,6 +3175,9 @@ static int btusb_probe(struct usb_interface *intf,
                hdev->set_bdaddr = btusb_set_bdaddr_ath3012;
        }
 
+       if (id->driver_info & BTUSB_REALTEK)
+               hdev->setup = btusb_setup_realtek;
+
        if (id->driver_info & BTUSB_AMP) {
                /* AMP controllers do not support SCO packets */
                data->isoc = NULL;
index 1b3f8647ea2fd446e1a6f14979a27e399fb0dbf5..ec8fa0e0f03630c9646a60b831277477a249b010 100644 (file)
@@ -95,7 +95,6 @@ static void ath_hci_uart_work(struct work_struct *work)
        hci_uart_tx_wakeup(hu);
 }
 
-/* Initialize protocol */
 static int ath_open(struct hci_uart *hu)
 {
        struct ath_struct *ath;
@@ -116,8 +115,7 @@ static int ath_open(struct hci_uart *hu)
        return 0;
 }
 
-/* Flush protocol data */
-static int ath_flush(struct hci_uart *hu)
+static int ath_close(struct hci_uart *hu)
 {
        struct ath_struct *ath = hu->priv;
 
@@ -125,11 +123,17 @@ static int ath_flush(struct hci_uart *hu)
 
        skb_queue_purge(&ath->txq);
 
+       kfree_skb(ath->rx_skb);
+
+       cancel_work_sync(&ath->ctxtsw);
+
+       hu->priv = NULL;
+       kfree(ath);
+
        return 0;
 }
 
-/* Close protocol */
-static int ath_close(struct hci_uart *hu)
+static int ath_flush(struct hci_uart *hu)
 {
        struct ath_struct *ath = hu->priv;
 
@@ -137,19 +141,65 @@ static int ath_close(struct hci_uart *hu)
 
        skb_queue_purge(&ath->txq);
 
-       kfree_skb(ath->rx_skb);
+       return 0;
+}
 
-       cancel_work_sync(&ath->ctxtsw);
+static int ath_set_bdaddr(struct hci_dev *hdev, const bdaddr_t *bdaddr)
+{
+       struct sk_buff *skb;
+       u8 buf[10];
+       int err;
+
+       buf[0] = 0x01;
+       buf[1] = 0x01;
+       buf[2] = 0x00;
+       buf[3] = sizeof(bdaddr_t);
+       memcpy(buf + 4, bdaddr, sizeof(bdaddr_t));
+
+       skb = __hci_cmd_sync(hdev, 0xfc0b, sizeof(buf), buf, HCI_INIT_TIMEOUT);
+       if (IS_ERR(skb)) {
+               err = PTR_ERR(skb);
+               BT_ERR("%s: Change address command failed (%d)",
+                      hdev->name, err);
+               return err;
+       }
+       kfree_skb(skb);
 
-       hu->priv = NULL;
-       kfree(ath);
+       return 0;
+}
+
+static int ath_setup(struct hci_uart *hu)
+{
+       BT_DBG("hu %p", hu);
+
+       hu->hdev->set_bdaddr = ath_set_bdaddr;
 
        return 0;
 }
 
+static const struct h4_recv_pkt ath_recv_pkts[] = {
+       { H4_RECV_ACL,   .recv = hci_recv_frame },
+       { H4_RECV_SCO,   .recv = hci_recv_frame },
+       { H4_RECV_EVENT, .recv = hci_recv_frame },
+};
+
+static int ath_recv(struct hci_uart *hu, const void *data, int count)
+{
+       struct ath_struct *ath = hu->priv;
+
+       ath->rx_skb = h4_recv_buf(hu->hdev, ath->rx_skb, data, count,
+                                 ath_recv_pkts, ARRAY_SIZE(ath_recv_pkts));
+       if (IS_ERR(ath->rx_skb)) {
+               int err = PTR_ERR(ath->rx_skb);
+               BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
+               return err;
+       }
+
+       return count;
+}
+
 #define HCI_OP_ATH_SLEEP 0xFC04
 
-/* Enqueue frame for transmittion */
 static int ath_enqueue(struct hci_uart *hu, struct sk_buff *skb)
 {
        struct ath_struct *ath = hu->priv;
@@ -159,8 +209,7 @@ static int ath_enqueue(struct hci_uart *hu, struct sk_buff *skb)
                return 0;
        }
 
-       /*
-        * Update power management enable flag with parameters of
+       /* Update power management enable flag with parameters of
         * HCI sleep enable vendor specific HCI command.
         */
        if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT) {
@@ -190,37 +239,16 @@ static struct sk_buff *ath_dequeue(struct hci_uart *hu)
        return skb_dequeue(&ath->txq);
 }
 
-static const struct h4_recv_pkt ath_recv_pkts[] = {
-       { H4_RECV_ACL,   .recv = hci_recv_frame },
-       { H4_RECV_SCO,   .recv = hci_recv_frame },
-       { H4_RECV_EVENT, .recv = hci_recv_frame },
-};
-
-/* Recv data */
-static int ath_recv(struct hci_uart *hu, const void *data, int count)
-{
-       struct ath_struct *ath = hu->priv;
-
-       ath->rx_skb = h4_recv_buf(hu->hdev, ath->rx_skb, data, count,
-                                 ath_recv_pkts, ARRAY_SIZE(ath_recv_pkts));
-       if (IS_ERR(ath->rx_skb)) {
-               int err = PTR_ERR(ath->rx_skb);
-               BT_ERR("%s: Frame reassembly failed (%d)", hu->hdev->name, err);
-               return err;
-       }
-
-       return count;
-}
-
 static const struct hci_uart_proto athp = {
        .id             = HCI_UART_ATH3K,
        .name           = "ATH3K",
        .open           = ath_open,
        .close          = ath_close,
+       .flush          = ath_flush,
+       .setup          = ath_setup,
        .recv           = ath_recv,
        .enqueue        = ath_enqueue,
        .dequeue        = ath_dequeue,
-       .flush          = ath_flush,
 };
 
 int __init ath_init(void)
index b854125e48311aa1e101c8ea9ab6ff0ec909753c..5340604b23a4e1b24b0c01bf3798dbc35fcda40a 100644 (file)
@@ -660,7 +660,7 @@ validate_group(struct perf_event *event)
                 * Initialise the fake PMU. We only need to populate the
                 * used_mask for the purposes of validation.
                 */
-               .used_mask = CPU_BITS_NONE,
+               .used_mask = { 0 },
        };
 
        if (!validate_event(event->pmu, &fake_pmu, leader))
index 5bd792c68f9b897ddbafe50a39c3bdb9b49306d6..ab3bde16ecb4443a2e63d8e327fa1db730294dff 100644 (file)
@@ -453,7 +453,7 @@ void __iomem *mips_cdmm_early_probe(unsigned int dev_type)
 
        /* Look for a specific device type */
        for (; drb < bus->drbs; drb += size + 1) {
-               acsr = readl(cdmm + drb * CDMM_DRB_SIZE);
+               acsr = __raw_readl(cdmm + drb * CDMM_DRB_SIZE);
                type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT;
                if (type == dev_type)
                        return cdmm + drb * CDMM_DRB_SIZE;
@@ -500,7 +500,7 @@ static void mips_cdmm_bus_discover(struct mips_cdmm_bus *bus)
        bus->discovered = true;
        pr_info("cdmm%u discovery (%u blocks)\n", cpu, bus->drbs);
        for (; drb < bus->drbs; drb += size + 1) {
-               acsr = readl(cdmm + drb * CDMM_DRB_SIZE);
+               acsr = __raw_readl(cdmm + drb * CDMM_DRB_SIZE);
                type = (acsr & CDMM_ACSR_DEVTYPE) >> CDMM_ACSR_DEVTYPE_SHIFT;
                size = (acsr & CDMM_ACSR_DEVSIZE) >> CDMM_ACSR_DEVSIZE_SHIFT;
                rev  = (acsr & CDMM_ACSR_DEVREV)  >> CDMM_ACSR_DEVREV_SHIFT;
index fb9ec6221730a2d594f66d15e54471aea75cc750..6f047dcb94c22b3ea67349bf1c4b95be6b91037a 100644 (file)
@@ -58,7 +58,6 @@
 #include <linux/debugfs.h>
 #include <linux/log2.h>
 #include <linux/syscore_ops.h>
-#include <linux/memblock.h>
 
 /*
  * DDR target is the same on all platforms.
@@ -70,6 +69,7 @@
  */
 #define WIN_CTRL_OFF           0x0000
 #define   WIN_CTRL_ENABLE       BIT(0)
+/* Only on HW I/O coherency capable platforms */
 #define   WIN_CTRL_SYNCBARRIER  BIT(1)
 #define   WIN_CTRL_TGT_MASK     0xf0
 #define   WIN_CTRL_TGT_SHIFT    4
 
 /* Relative to mbusbridge_base */
 #define MBUS_BRIDGE_CTRL_OFF   0x0
-#define  MBUS_BRIDGE_SIZE_MASK  0xffff0000
 #define MBUS_BRIDGE_BASE_OFF   0x4
-#define  MBUS_BRIDGE_BASE_MASK  0xffff0000
 
 /* Maximum number of windows, for all known platforms */
 #define MBUS_WINS_MAX           20
@@ -323,8 +321,9 @@ static int mvebu_mbus_setup_window(struct mvebu_mbus_state *mbus,
        ctrl = ((size - 1) & WIN_CTRL_SIZE_MASK) |
                (attr << WIN_CTRL_ATTR_SHIFT)    |
                (target << WIN_CTRL_TGT_SHIFT)   |
-               WIN_CTRL_SYNCBARRIER             |
                WIN_CTRL_ENABLE;
+       if (mbus->hw_io_coherency)
+               ctrl |= WIN_CTRL_SYNCBARRIER;
 
        writel(base & WIN_BASE_LOW, addr + WIN_BASE_OFF);
        writel(ctrl, addr + WIN_CTRL_OFF);
@@ -577,106 +576,36 @@ static unsigned int armada_xp_mbus_win_remap_offset(int win)
                return MVEBU_MBUS_NO_REMAP;
 }
 
-/*
- * Use the memblock information to find the MBus bridge hole in the
- * physical address space.
- */
-static void __init
-mvebu_mbus_find_bridge_hole(uint64_t *start, uint64_t *end)
-{
-       struct memblock_region *r;
-       uint64_t s = 0;
-
-       for_each_memblock(memory, r) {
-               /*
-                * This part of the memory is above 4 GB, so we don't
-                * care for the MBus bridge hole.
-                */
-               if (r->base >= 0x100000000)
-                       continue;
-
-               /*
-                * The MBus bridge hole is at the end of the RAM under
-                * the 4 GB limit.
-                */
-               if (r->base + r->size > s)
-                       s = r->base + r->size;
-       }
-
-       *start = s;
-       *end = 0x100000000;
-}
-
 static void __init
 mvebu_mbus_default_setup_cpu_target(struct mvebu_mbus_state *mbus)
 {
        int i;
        int cs;
-       uint64_t mbus_bridge_base, mbus_bridge_end;
 
        mvebu_mbus_dram_info.mbus_dram_target_id = TARGET_DDR;
 
-       mvebu_mbus_find_bridge_hole(&mbus_bridge_base, &mbus_bridge_end);
-
        for (i = 0, cs = 0; i < 4; i++) {
-               u64 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
-               u64 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
-               u64 end;
-               struct mbus_dram_window *w;
-
-               /* Ignore entries that are not enabled */
-               if (!(size & DDR_SIZE_ENABLED))
-                       continue;
-
-               /*
-                * Ignore entries whose base address is above 2^32,
-                * since devices cannot DMA to such high addresses
-                */
-               if (base & DDR_BASE_CS_HIGH_MASK)
-                       continue;
-
-               base = base & DDR_BASE_CS_LOW_MASK;
-               size = (size | ~DDR_SIZE_MASK) + 1;
-               end = base + size;
-
-               /*
-                * Adjust base/size of the current CS to make sure it
-                * doesn't overlap with the MBus bridge hole. This is
-                * particularly important for devices that do DMA from
-                * DRAM to a SRAM mapped in a MBus window, such as the
-                * CESA cryptographic engine.
-                */
+               u32 base = readl(mbus->sdramwins_base + DDR_BASE_CS_OFF(i));
+               u32 size = readl(mbus->sdramwins_base + DDR_SIZE_CS_OFF(i));
 
                /*
-                * The CS is fully enclosed inside the MBus bridge
-                * area, so ignore it.
+                * We only take care of entries for which the chip
+                * select is enabled, and that don't have high base
+                * address bits set (devices can only access the first
+                * 32 bits of the memory).
                 */
-               if (base >= mbus_bridge_base && end <= mbus_bridge_end)
-                       continue;
+               if ((size & DDR_SIZE_ENABLED) &&
+                   !(base & DDR_BASE_CS_HIGH_MASK)) {
+                       struct mbus_dram_window *w;
 
-               /*
-                * Beginning of CS overlaps with end of MBus, raise CS
-                * base address, and shrink its size.
-                */
-               if (base >= mbus_bridge_base && end > mbus_bridge_end) {
-                       size -= mbus_bridge_end - base;
-                       base = mbus_bridge_end;
+                       w = &mvebu_mbus_dram_info.cs[cs++];
+                       w->cs_index = i;
+                       w->mbus_attr = 0xf & ~(1 << i);
+                       if (mbus->hw_io_coherency)
+                               w->mbus_attr |= ATTR_HW_COHERENCY;
+                       w->base = base & DDR_BASE_CS_LOW_MASK;
+                       w->size = (size | ~DDR_SIZE_MASK) + 1;
                }
-
-               /*
-                * End of CS overlaps with beginning of MBus, shrink
-                * CS size.
-                */
-               if (base < mbus_bridge_base && end > mbus_bridge_base)
-                       size -= end - mbus_bridge_base;
-
-               w = &mvebu_mbus_dram_info.cs[cs++];
-               w->cs_index = i;
-               w->mbus_attr = 0xf & ~(1 << i);
-               if (mbus->hw_io_coherency)
-                       w->mbus_attr |= ATTR_HW_COHERENCY;
-               w->base = base;
-               w->size = size;
        }
        mvebu_mbus_dram_info.num_cs = cs;
 }
index 11f7982cbdb321ba26020b8c8a1495973ebb62e1..ebee57d715d2314df6b3ab0bff60cde656691a93 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * OMAP L3 Interconnect error handling driver
  *
- * Copyright (C) 2011-2014 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2011-2015 Texas Instruments Incorporated - http://www.ti.com/
  *     Santosh Shilimkar <santosh.shilimkar@ti.com>
  *     Sricharan <r.sricharan@ti.com>
  *
@@ -233,7 +233,8 @@ static irqreturn_t l3_interrupt_handler(int irq, void *_l3)
 }
 
 static const struct of_device_id l3_noc_match[] = {
-       {.compatible = "ti,omap4-l3-noc", .data = &omap_l3_data},
+       {.compatible = "ti,omap4-l3-noc", .data = &omap4_l3_data},
+       {.compatible = "ti,omap5-l3-noc", .data = &omap5_l3_data},
        {.compatible = "ti,dra7-l3-noc", .data = &dra_l3_data},
        {.compatible = "ti,am4372-l3-noc", .data = &am4372_l3_data},
        {},
index 95254585db86aca1adc29091a28486fe821e7e53..73431f81da28c0036fba33b52a17641eee5e385a 100644 (file)
@@ -1,7 +1,7 @@
 /*
  * OMAP L3 Interconnect  error handling driver header
  *
- * Copyright (C) 2011-2014 Texas Instruments Incorporated - http://www.ti.com/
+ * Copyright (C) 2011-2015 Texas Instruments Incorporated - http://www.ti.com/
  *     Santosh Shilimkar <santosh.shilimkar@ti.com>
  *     sricharan <r.sricharan@ti.com>
  *
@@ -175,16 +175,14 @@ static struct l3_flagmux_data omap_l3_flagmux_clk2 = {
 };
 
 
-static struct l3_target_data omap_l3_target_data_clk3[] = {
-       {0x0100, "EMUSS",},
-       {0x0300, "DEBUG SOURCE",},
-       {0x0,   "HOST CLK3",},
+static struct l3_target_data omap4_l3_target_data_clk3[] = {
+       {0x0100, "DEBUGSS",},
 };
 
-static struct l3_flagmux_data omap_l3_flagmux_clk3 = {
+static struct l3_flagmux_data omap4_l3_flagmux_clk3 = {
        .offset = 0x0200,
-       .l3_targ = omap_l3_target_data_clk3,
-       .num_targ_data = ARRAY_SIZE(omap_l3_target_data_clk3),
+       .l3_targ = omap4_l3_target_data_clk3,
+       .num_targ_data = ARRAY_SIZE(omap4_l3_target_data_clk3),
 };
 
 static struct l3_masters_data omap_l3_masters[] = {
@@ -215,21 +213,49 @@ static struct l3_masters_data omap_l3_masters[] = {
        { 0x32, "USBHOSTFS"}
 };
 
-static struct l3_flagmux_data *omap_l3_flagmux[] = {
+static struct l3_flagmux_data *omap4_l3_flagmux[] = {
        &omap_l3_flagmux_clk1,
        &omap_l3_flagmux_clk2,
-       &omap_l3_flagmux_clk3,
+       &omap4_l3_flagmux_clk3,
 };
 
-static const struct omap_l3 omap_l3_data = {
-       .l3_flagmux = omap_l3_flagmux,
-       .num_modules = ARRAY_SIZE(omap_l3_flagmux),
+static const struct omap_l3 omap4_l3_data = {
+       .l3_flagmux = omap4_l3_flagmux,
+       .num_modules = ARRAY_SIZE(omap4_l3_flagmux),
        .l3_masters = omap_l3_masters,
        .num_masters = ARRAY_SIZE(omap_l3_masters),
        /* The 6 MSBs of register field used to distinguish initiator */
        .mst_addr_mask = 0xFC,
 };
 
+/* OMAP5 data */
+static struct l3_target_data omap5_l3_target_data_clk3[] = {
+       {0x0100, "L3INSTR",},
+       {0x0300, "DEBUGSS",},
+       {0x0,    "HOSTCLK3",},
+};
+
+static struct l3_flagmux_data omap5_l3_flagmux_clk3 = {
+       .offset = 0x0200,
+       .l3_targ = omap5_l3_target_data_clk3,
+       .num_targ_data = ARRAY_SIZE(omap5_l3_target_data_clk3),
+};
+
+static struct l3_flagmux_data *omap5_l3_flagmux[] = {
+       &omap_l3_flagmux_clk1,
+       &omap_l3_flagmux_clk2,
+       &omap5_l3_flagmux_clk3,
+};
+
+static const struct omap_l3 omap5_l3_data = {
+       .l3_flagmux = omap5_l3_flagmux,
+       .num_modules = ARRAY_SIZE(omap5_l3_flagmux),
+       .l3_masters = omap_l3_masters,
+       .num_masters = ARRAY_SIZE(omap_l3_masters),
+       /* The 6 MSBs of register field used to distinguish initiator */
+       .mst_addr_mask = 0x7E0,
+};
+
 /* DRA7 data */
 static struct l3_target_data dra_l3_target_data_clk1[] = {
        {0x2a00, "AES1",},
@@ -274,7 +300,7 @@ static struct l3_flagmux_data dra_l3_flagmux_clk1 = {
 
 static struct l3_target_data dra_l3_target_data_clk2[] = {
        {0x0,   "HOST CLK1",},
-       {0x0,   "HOST CLK2",},
+       {0x800000, "HOST CLK2",},
        {0xdead, L3_TARGET_NOT_SUPPORTED,},
        {0x3400, "SHA2_2",},
        {0x0900, "BB2D",},
index a3bebef255ad3af669c2134ce85805d0af4b0523..0c98a9d51a2494e6a49ef49e6bfb557cefca1974 100644 (file)
@@ -33,7 +33,7 @@
 #include <asm/io.h>
 #include <asm/msr.h>
 #include <asm/cpufeature.h>
-#include <asm/i387.h>
+#include <asm/fpu/api.h>
 
 
 
index 597fed423d7d31906b1ca37f3fa9931231a63460..df2c1afa52b4acaa6204d5595a76c65f7dc0cb70 100644 (file)
@@ -29,7 +29,7 @@
 #define PERIPHERAL_RSHIFT_MASK 0x3
 #define PERIPHERAL_RSHIFT(val) (((val) >> 16) & PERIPHERAL_RSHIFT_MASK)
 
-#define PERIPHERAL_MAX_SHIFT   4
+#define PERIPHERAL_MAX_SHIFT   3
 
 struct clk_peripheral {
        struct clk_hw hw;
@@ -242,7 +242,7 @@ static long clk_sam9x5_peripheral_round_rate(struct clk_hw *hw,
                return *parent_rate;
 
        if (periph->range.max) {
-               for (; shift < PERIPHERAL_MAX_SHIFT; shift++) {
+               for (; shift <= PERIPHERAL_MAX_SHIFT; shift++) {
                        cur_rate = *parent_rate >> shift;
                        if (cur_rate <= periph->range.max)
                                break;
@@ -254,7 +254,7 @@ static long clk_sam9x5_peripheral_round_rate(struct clk_hw *hw,
 
        best_diff = cur_rate - rate;
        best_rate = cur_rate;
-       for (; shift < PERIPHERAL_MAX_SHIFT; shift++) {
+       for (; shift <= PERIPHERAL_MAX_SHIFT; shift++) {
                cur_rate = *parent_rate >> shift;
                if (cur_rate < rate)
                        cur_diff = rate - cur_rate;
@@ -289,7 +289,7 @@ static int clk_sam9x5_peripheral_set_rate(struct clk_hw *hw,
        if (periph->range.max && rate > periph->range.max)
                return -EINVAL;
 
-       for (shift = 0; shift < PERIPHERAL_MAX_SHIFT; shift++) {
+       for (shift = 0; shift <= PERIPHERAL_MAX_SHIFT; shift++) {
                if (parent_rate >> shift == rate) {
                        periph->auto_div = false;
                        periph->div = shift;
index 6ec79dbc0840ad8940e9e9ab599a0f865f1cd881..cbbe40377ad622a7f9d38aca5651916dda549e54 100644 (file)
@@ -173,8 +173,7 @@ static long clk_pll_get_best_div_mul(struct clk_pll *pll, unsigned long rate,
        int i = 0;
 
        /* Check if parent_rate is a valid input rate */
-       if (parent_rate < characteristics->input.min ||
-           parent_rate > characteristics->input.max)
+       if (parent_rate < characteristics->input.min)
                return -ERANGE;
 
        /*
@@ -187,6 +186,15 @@ static long clk_pll_get_best_div_mul(struct clk_pll *pll, unsigned long rate,
        if (!mindiv)
                mindiv = 1;
 
+       if (parent_rate > characteristics->input.max) {
+               tmpdiv = DIV_ROUND_UP(parent_rate, characteristics->input.max);
+               if (tmpdiv > PLL_DIV_MAX)
+                       return -ERANGE;
+
+               if (tmpdiv > mindiv)
+                       mindiv = tmpdiv;
+       }
+
        /*
         * Calculate the maximum divider which is limited by PLL register
         * layout (limited by the MUL or DIV field size).
index 69abb08cf146513b0307a4a78449b2e5da971282..eb8e5dc9076d46f07901a98db214fbeec0b0a3dc 100644 (file)
@@ -121,7 +121,7 @@ extern void __init of_at91sam9x5_clk_smd_setup(struct device_node *np,
                                               struct at91_pmc *pmc);
 #endif
 
-#if defined(CONFIG_HAVE_AT91_SMD)
+#if defined(CONFIG_HAVE_AT91_H32MX)
 extern void __init of_sama5d4_clk_h32mx_setup(struct device_node *np,
                                              struct at91_pmc *pmc);
 #endif
index 44ea107cfc6729818ea8a02dfd250f7e6a557967..30335d3b99afb197332505d5be045f2d5d75be4c 100644 (file)
@@ -1128,13 +1128,6 @@ static int si5351_dt_parse(struct i2c_client *client,
        if (!pdata)
                return -ENOMEM;
 
-       pdata->clk_xtal = of_clk_get(np, 0);
-       if (!IS_ERR(pdata->clk_xtal))
-               clk_put(pdata->clk_xtal);
-       pdata->clk_clkin = of_clk_get(np, 1);
-       if (!IS_ERR(pdata->clk_clkin))
-               clk_put(pdata->clk_clkin);
-
        /*
         * property silabs,pll-source : <num src>, [<..>]
         * allow to selectively set pll source
@@ -1328,8 +1321,22 @@ static int si5351_i2c_probe(struct i2c_client *client,
        i2c_set_clientdata(client, drvdata);
        drvdata->client = client;
        drvdata->variant = variant;
-       drvdata->pxtal = pdata->clk_xtal;
-       drvdata->pclkin = pdata->clk_clkin;
+       drvdata->pxtal = devm_clk_get(&client->dev, "xtal");
+       drvdata->pclkin = devm_clk_get(&client->dev, "clkin");
+
+       if (PTR_ERR(drvdata->pxtal) == -EPROBE_DEFER ||
+           PTR_ERR(drvdata->pclkin) == -EPROBE_DEFER)
+               return -EPROBE_DEFER;
+
+       /*
+        * Check for valid parent clock: VARIANT_A and VARIANT_B need XTAL,
+        *   VARIANT_C can have CLKIN instead.
+        */
+       if (IS_ERR(drvdata->pxtal) &&
+           (drvdata->variant != SI5351_VARIANT_C || IS_ERR(drvdata->pclkin))) {
+               dev_err(&client->dev, "missing parent clock\n");
+               return -EINVAL;
+       }
 
        drvdata->regmap = devm_regmap_init_i2c(client, &si5351_regmap_config);
        if (IS_ERR(drvdata->regmap)) {
@@ -1393,6 +1400,11 @@ static int si5351_i2c_probe(struct i2c_client *client,
                }
        }
 
+       if (!IS_ERR(drvdata->pxtal))
+               clk_prepare_enable(drvdata->pxtal);
+       if (!IS_ERR(drvdata->pclkin))
+               clk_prepare_enable(drvdata->pclkin);
+
        /* register xtal input clock gate */
        memset(&init, 0, sizeof(init));
        init.name = si5351_input_names[0];
@@ -1407,7 +1419,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
        clk = devm_clk_register(&client->dev, &drvdata->xtal);
        if (IS_ERR(clk)) {
                dev_err(&client->dev, "unable to register %s\n", init.name);
-               return PTR_ERR(clk);
+               ret = PTR_ERR(clk);
+               goto err_clk;
        }
 
        /* register clkin input clock gate */
@@ -1425,7 +1438,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
                if (IS_ERR(clk)) {
                        dev_err(&client->dev, "unable to register %s\n",
                                init.name);
-                       return PTR_ERR(clk);
+                       ret = PTR_ERR(clk);
+                       goto err_clk;
                }
        }
 
@@ -1447,7 +1461,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
        clk = devm_clk_register(&client->dev, &drvdata->pll[0].hw);
        if (IS_ERR(clk)) {
                dev_err(&client->dev, "unable to register %s\n", init.name);
-               return -EINVAL;
+               ret = PTR_ERR(clk);
+               goto err_clk;
        }
 
        /* register PLLB or VXCO (Si5351B) */
@@ -1471,7 +1486,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
        clk = devm_clk_register(&client->dev, &drvdata->pll[1].hw);
        if (IS_ERR(clk)) {
                dev_err(&client->dev, "unable to register %s\n", init.name);
-               return -EINVAL;
+               ret = PTR_ERR(clk);
+               goto err_clk;
        }
 
        /* register clk multisync and clk out divider */
@@ -1492,8 +1508,10 @@ static int si5351_i2c_probe(struct i2c_client *client,
                num_clocks * sizeof(*drvdata->onecell.clks), GFP_KERNEL);
 
        if (WARN_ON(!drvdata->msynth || !drvdata->clkout ||
-                   !drvdata->onecell.clks))
-               return -ENOMEM;
+                   !drvdata->onecell.clks)) {
+               ret = -ENOMEM;
+               goto err_clk;
+       }
 
        for (n = 0; n < num_clocks; n++) {
                drvdata->msynth[n].num = n;
@@ -1511,7 +1529,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
                if (IS_ERR(clk)) {
                        dev_err(&client->dev, "unable to register %s\n",
                                init.name);
-                       return -EINVAL;
+                       ret = PTR_ERR(clk);
+                       goto err_clk;
                }
        }
 
@@ -1538,7 +1557,8 @@ static int si5351_i2c_probe(struct i2c_client *client,
                if (IS_ERR(clk)) {
                        dev_err(&client->dev, "unable to register %s\n",
                                init.name);
-                       return -EINVAL;
+                       ret = PTR_ERR(clk);
+                       goto err_clk;
                }
                drvdata->onecell.clks[n] = clk;
 
@@ -1557,10 +1577,17 @@ static int si5351_i2c_probe(struct i2c_client *client,
                                  &drvdata->onecell);
        if (ret) {
                dev_err(&client->dev, "unable to add clk provider\n");
-               return ret;
+               goto err_clk;
        }
 
        return 0;
+
+err_clk:
+       if (!IS_ERR(drvdata->pxtal))
+               clk_disable_unprepare(drvdata->pxtal);
+       if (!IS_ERR(drvdata->pclkin))
+               clk_disable_unprepare(drvdata->pclkin);
+       return ret;
 }
 
 static const struct i2c_device_id si5351_i2c_ids[] = {
index 459ce9da13e0631b41e6cafc5ed9a85523e5e1b7..5b0f41868b425672e6295ac6b30a52e43cf5730c 100644 (file)
@@ -1475,8 +1475,10 @@ static struct clk_core *__clk_set_parent_before(struct clk_core *clk,
         */
        if (clk->prepare_count) {
                clk_core_prepare(parent);
+               flags = clk_enable_lock();
                clk_core_enable(parent);
                clk_core_enable(clk);
+               clk_enable_unlock(flags);
        }
 
        /* update the clk tree topology */
@@ -1491,13 +1493,17 @@ static void __clk_set_parent_after(struct clk_core *core,
                                   struct clk_core *parent,
                                   struct clk_core *old_parent)
 {
+       unsigned long flags;
+
        /*
         * Finish the migration of prepare state and undo the changes done
         * for preventing a race with clk_enable().
         */
        if (core->prepare_count) {
+               flags = clk_enable_lock();
                clk_core_disable(core);
                clk_core_disable(old_parent);
+               clk_enable_unlock(flags);
                clk_core_unprepare(old_parent);
        }
 }
@@ -1525,8 +1531,10 @@ static int __clk_set_parent(struct clk_core *clk, struct clk_core *parent,
                clk_enable_unlock(flags);
 
                if (clk->prepare_count) {
+                       flags = clk_enable_lock();
                        clk_core_disable(clk);
                        clk_core_disable(parent);
+                       clk_enable_unlock(flags);
                        clk_core_unprepare(parent);
                }
                return ret;
index d3458474eb3a1ef6bbc67f919b60b725978e8aee..c66f7bc2ae87cde429121af226970c2c7fd973c4 100644 (file)
@@ -71,8 +71,8 @@ static const char *gcc_xo_gpll0_bimc[] = {
 static const struct parent_map gcc_xo_gpll0a_gpll1_gpll2a_map[] = {
        { P_XO, 0 },
        { P_GPLL0_AUX, 3 },
-       { P_GPLL2_AUX, 2 },
        { P_GPLL1, 1 },
+       { P_GPLL2_AUX, 2 },
 };
 
 static const char *gcc_xo_gpll0a_gpll1_gpll2a[] = {
@@ -1115,7 +1115,7 @@ static struct clk_rcg2 usb_hs_system_clk_src = {
 static const struct freq_tbl ftbl_gcc_venus0_vcodec0_clk[] = {
        F(100000000, P_GPLL0, 8, 0, 0),
        F(160000000, P_GPLL0, 5, 0, 0),
-       F(228570000, P_GPLL0, 5, 0, 0),
+       F(228570000, P_GPLL0, 3.5, 0, 0),
        { }
 };
 
index 17e9af7fe81fe0aad57ad6ec1c2a2ac05b2280dd..a17683b2cf276b03e287bd6959f217e7aacb7c5f 100644 (file)
@@ -10,7 +10,7 @@ obj-$(CONFIG_SOC_EXYNOS5250)  += clk-exynos5250.o
 obj-$(CONFIG_SOC_EXYNOS5260)   += clk-exynos5260.o
 obj-$(CONFIG_SOC_EXYNOS5410)   += clk-exynos5410.o
 obj-$(CONFIG_SOC_EXYNOS5420)   += clk-exynos5420.o
-obj-$(CONFIG_ARCH_EXYNOS5433)  += clk-exynos5433.o
+obj-$(CONFIG_ARCH_EXYNOS)      += clk-exynos5433.o
 obj-$(CONFIG_SOC_EXYNOS5440)   += clk-exynos5440.o
 obj-$(CONFIG_ARCH_EXYNOS)      += clk-exynos-audss.o
 obj-$(CONFIG_ARCH_EXYNOS)      += clk-exynos-clkout.o
index 07d666cc6a29b9f3567deec197756b5927f68ac7..bea4a173eef5e40e12a4a05f8f6ccb3310700814 100644 (file)
@@ -271,6 +271,7 @@ static const struct samsung_clk_reg_dump exynos5420_set_clksrc[] = {
        { .offset = SRC_MASK_PERIC0,            .value = 0x11111110, },
        { .offset = SRC_MASK_PERIC1,            .value = 0x11111100, },
        { .offset = SRC_MASK_ISP,               .value = 0x11111000, },
+       { .offset = GATE_BUS_TOP,               .value = 0xffffffff, },
        { .offset = GATE_BUS_DISP1,             .value = 0xffffffff, },
        { .offset = GATE_IP_PERIC,              .value = 0xffffffff, },
 };
index 387e3e39e63519d401cbfe87c949308d64db5897..9e04ae2bb4d74912f18976c305a1738756b19837 100644 (file)
@@ -748,7 +748,7 @@ static struct samsung_pll_rate_table exynos5443_pll_rates[] = {
        PLL_35XX_RATE(825000000U,  275, 4,  1),
        PLL_35XX_RATE(800000000U,  400, 6,  1),
        PLL_35XX_RATE(733000000U,  733, 12, 1),
-       PLL_35XX_RATE(700000000U,  360, 6,  1),
+       PLL_35XX_RATE(700000000U,  175, 3,  1),
        PLL_35XX_RATE(667000000U,  222, 4,  1),
        PLL_35XX_RATE(633000000U,  211, 4,  1),
        PLL_35XX_RATE(600000000U,  500, 5,  2),
@@ -760,14 +760,14 @@ static struct samsung_pll_rate_table exynos5443_pll_rates[] = {
        PLL_35XX_RATE(444000000U,  370, 5,  2),
        PLL_35XX_RATE(420000000U,  350, 5,  2),
        PLL_35XX_RATE(400000000U,  400, 6,  2),
-       PLL_35XX_RATE(350000000U,  360, 6,  2),
+       PLL_35XX_RATE(350000000U,  350, 6,  2),
        PLL_35XX_RATE(333000000U,  222, 4,  2),
        PLL_35XX_RATE(300000000U,  500, 5,  3),
        PLL_35XX_RATE(266000000U,  532, 6,  3),
        PLL_35XX_RATE(200000000U,  400, 6,  3),
        PLL_35XX_RATE(166000000U,  332, 6,  3),
        PLL_35XX_RATE(160000000U,  320, 6,  3),
-       PLL_35XX_RATE(133000000U,  552, 6,  4),
+       PLL_35XX_RATE(133000000U,  532, 6,  4),
        PLL_35XX_RATE(100000000U,  400, 6,  4),
        { /* sentinel */ }
 };
@@ -1490,7 +1490,7 @@ static struct samsung_gate_clock mif_gate_clks[] __initdata = {
 
        /* ENABLE_PCLK_MIF_SECURE_MONOTONIC_CNT */
        GATE(CLK_PCLK_MONOTONIC_CNT, "pclk_monotonic_cnt", "div_aclk_mif_133",
-                       ENABLE_PCLK_MIF_SECURE_RTC, 0, 0, 0),
+                       ENABLE_PCLK_MIF_SECURE_MONOTONIC_CNT, 0, 0, 0),
 
        /* ENABLE_PCLK_MIF_SECURE_RTC */
        GATE(CLK_PCLK_RTC, "pclk_rtc", "div_aclk_mif_133",
@@ -3665,7 +3665,7 @@ static struct samsung_gate_clock apollo_gate_clks[] __initdata = {
                        ENABLE_SCLK_APOLLO, 3, CLK_IGNORE_UNUSED, 0),
        GATE(CLK_SCLK_HPM_APOLLO, "sclk_hpm_apollo", "div_sclk_hpm_apollo",
                        ENABLE_SCLK_APOLLO, 1, CLK_IGNORE_UNUSED, 0),
-       GATE(CLK_SCLK_APOLLO, "sclk_apollo", "div_apollo_pll",
+       GATE(CLK_SCLK_APOLLO, "sclk_apollo", "div_apollo2",
                        ENABLE_SCLK_APOLLO, 0, CLK_IGNORE_UNUSED, 0),
 };
 
@@ -3927,7 +3927,7 @@ CLK_OF_DECLARE(exynos5433_cmu_atlas, "samsung,exynos5433-cmu-atlas",
 #define ENABLE_PCLK_MSCL                               0x0900
 #define ENABLE_PCLK_MSCL_SECURE_SMMU_M2MSCALER0                0x0904
 #define ENABLE_PCLK_MSCL_SECURE_SMMU_M2MSCALER1                0x0908
-#define ENABLE_PCLK_MSCL_SECURE_SMMU_JPEG              0x000c
+#define ENABLE_PCLK_MSCL_SECURE_SMMU_JPEG              0x090c
 #define ENABLE_SCLK_MSCL                               0x0a00
 #define ENABLE_IP_MSCL0                                        0x0b00
 #define ENABLE_IP_MSCL1                                        0x0b04
index 51d7865fdddb6d59ae7a9406ae8d88cb4da30066..32164ba3d36a75e12a205c53aa8cf000aaa5454a 100644 (file)
@@ -106,6 +106,16 @@ config CLKSRC_EFM32
          Support to use the timers of EFM32 SoCs as clock source and clock
          event device.
 
+config CLKSRC_LPC32XX
+       bool
+       select CLKSRC_MMIO
+       select CLKSRC_OF
+
+config CLKSRC_STM32
+       bool "Clocksource for STM32 SoCs" if !ARCH_STM32
+       depends on OF && ARM && (ARCH_STM32 || COMPILE_TEST)
+       select CLKSRC_MMIO
+
 config ARM_ARCH_TIMER
        bool
        select CLKSRC_OF if OF
@@ -139,6 +149,13 @@ config CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
        help
         Use ARM global timer clock source as sched_clock
 
+config ARMV7M_SYSTICK
+       bool
+       select CLKSRC_OF if OF
+       select CLKSRC_MMIO
+       help
+         This options enables support for the ARMv7M system timer unit
+
 config ATMEL_PIT
        select CLKSRC_OF if OF
        def_bool SOC_AT91SAM9 || SOC_SAMA5
index 5b85f6adb25834c807c24aafa08ed0529b2e8a53..1831a588b988b508e885687313e189bacffda4a2 100644 (file)
@@ -36,7 +36,9 @@ obj-$(CONFIG_ARCH_NSPIRE)     += zevio-timer.o
 obj-$(CONFIG_ARCH_BCM_MOBILE)  += bcm_kona_timer.o
 obj-$(CONFIG_CADENCE_TTC_TIMER)        += cadence_ttc_timer.o
 obj-$(CONFIG_CLKSRC_EFM32)     += time-efm32.o
+obj-$(CONFIG_CLKSRC_STM32)     += timer-stm32.o
 obj-$(CONFIG_CLKSRC_EXYNOS_MCT)        += exynos_mct.o
+obj-$(CONFIG_CLKSRC_LPC32XX)   += time-lpc32xx.o
 obj-$(CONFIG_CLKSRC_SAMSUNG_PWM)       += samsung_pwm_timer.o
 obj-$(CONFIG_FSL_FTM_TIMER)    += fsl_ftm_timer.o
 obj-$(CONFIG_VF_PIT_TIMER)     += vf_pit_timer.o
@@ -45,6 +47,7 @@ obj-$(CONFIG_MTK_TIMER)               += mtk_timer.o
 
 obj-$(CONFIG_ARM_ARCH_TIMER)           += arm_arch_timer.o
 obj-$(CONFIG_ARM_GLOBAL_TIMER)         += arm_global_timer.o
+obj-$(CONFIG_ARMV7M_SYSTICK)           += armv7m_systick.o
 obj-$(CONFIG_CLKSRC_METAG_GENERIC)     += metag_generic.o
 obj-$(CONFIG_ARCH_HAS_TICK_BROADCAST)  += dummy_timer.o
 obj-$(CONFIG_ARCH_KEYSTONE)            += timer-keystone.o
diff --git a/drivers/clocksource/armv7m_systick.c b/drivers/clocksource/armv7m_systick.c
new file mode 100644 (file)
index 0000000..addfd2c
--- /dev/null
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) Maxime Coquelin 2015
+ * Author:  Maxime Coquelin <mcoquelin.stm32@gmail.com>
+ * License terms:  GNU General Public License (GPL), version 2
+ */
+
+#include <linux/kernel.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/clk.h>
+#include <linux/bitops.h>
+
+#define SYST_CSR       0x00
+#define SYST_RVR       0x04
+#define SYST_CVR       0x08
+#define SYST_CALIB     0x0c
+
+#define SYST_CSR_ENABLE BIT(0)
+
+#define SYSTICK_LOAD_RELOAD_MASK 0x00FFFFFF
+
+static void __init system_timer_of_register(struct device_node *np)
+{
+       struct clk *clk = NULL;
+       void __iomem *base;
+       u32 rate;
+       int ret;
+
+       base = of_iomap(np, 0);
+       if (!base) {
+               pr_warn("system-timer: invalid base address\n");
+               return;
+       }
+
+       ret = of_property_read_u32(np, "clock-frequency", &rate);
+       if (ret) {
+               clk = of_clk_get(np, 0);
+               if (IS_ERR(clk))
+                       goto out_unmap;
+
+               ret = clk_prepare_enable(clk);
+               if (ret)
+                       goto out_clk_put;
+
+               rate = clk_get_rate(clk);
+               if (!rate)
+                       goto out_clk_disable;
+       }
+
+       writel_relaxed(SYSTICK_LOAD_RELOAD_MASK, base + SYST_RVR);
+       writel_relaxed(SYST_CSR_ENABLE, base + SYST_CSR);
+
+       ret = clocksource_mmio_init(base + SYST_CVR, "arm_system_timer", rate,
+                       200, 24, clocksource_mmio_readl_down);
+       if (ret) {
+               pr_err("failed to init clocksource (%d)\n", ret);
+               if (clk)
+                       goto out_clk_disable;
+               else
+                       goto out_unmap;
+       }
+
+       pr_info("ARM System timer initialized as clocksource\n");
+
+       return;
+
+out_clk_disable:
+       clk_disable_unprepare(clk);
+out_clk_put:
+       clk_put(clk);
+out_unmap:
+       iounmap(base);
+       pr_warn("ARM System timer register failed (%d)\n", ret);
+}
+
+CLOCKSOURCE_OF_DECLARE(arm_systick, "arm,armv7m-systick",
+                       system_timer_of_register);
index 2c9c993727c85a1975b845d3fe7b49d3115def97..4c2ba59897e84bda7c12368874e3c8f7716ae70c 100644 (file)
@@ -178,7 +178,7 @@ static void __init asm9260_timer_init(struct device_node *np)
        unsigned long rate;
 
        priv.base = of_io_request_and_map(np, 0, np->name);
-       if (!priv.base)
+       if (IS_ERR(priv.base))
                panic("%s: unable to map resource", np->name);
 
        clk = of_clk_get(np, 0);
index 83564c9cfdbe3b18dfb07a3799b73b991ffe00c7..935b05936dbdd9588764b0c04bf84db32b425af0 100644 (file)
@@ -209,7 +209,7 @@ static void exynos4_frc_resume(struct clocksource *cs)
        exynos4_mct_frc_start();
 }
 
-struct clocksource mct_frc = {
+static struct clocksource mct_frc = {
        .name           = "mct-frc",
        .rating         = 400,
        .read           = exynos4_frc_read,
@@ -413,7 +413,7 @@ static inline void exynos4_tick_set_mode(enum clock_event_mode mode,
        }
 }
 
-static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
+static void exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
 {
        struct clock_event_device *evt = &mevt->evt;
 
@@ -426,12 +426,8 @@ static int exynos4_mct_tick_clear(struct mct_clock_event_device *mevt)
                exynos4_mct_tick_stop(mevt);
 
        /* Clear the MCT tick interrupt */
-       if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1) {
+       if (readl_relaxed(reg_base + mevt->base + MCT_L_INT_CSTAT_OFFSET) & 1)
                exynos4_mct_write(0x1, mevt->base + MCT_L_INT_CSTAT_OFFSET);
-               return 1;
-       } else {
-               return 0;
-       }
 }
 
 static irqreturn_t exynos4_mct_tick_isr(int irq, void *dev_id)
@@ -564,18 +560,6 @@ out_irq:
        free_percpu_irq(mct_irqs[MCT_L0_IRQ], &percpu_mct_tick);
 }
 
-void __init mct_init(void __iomem *base, int irq_g0, int irq_l0, int irq_l1)
-{
-       mct_irqs[MCT_G0_IRQ] = irq_g0;
-       mct_irqs[MCT_L0_IRQ] = irq_l0;
-       mct_irqs[MCT_L1_IRQ] = irq_l1;
-       mct_int_type = MCT_INT_SPI;
-
-       exynos4_timer_resources(NULL, base);
-       exynos4_clocksource_init();
-       exynos4_clockevent_init();
-}
-
 static void __init mct_init_dt(struct device_node *np, unsigned int int_type)
 {
        u32 nr_irqs, i;
index 098c542e5c537dca3f159771960b453d61b578a8..cba2d015564c2cfe9dd9735ef771d7bc355ecca9 100644 (file)
@@ -40,8 +40,6 @@
 
 #define GPT_HZ 32768
 
-#define MSM_DGT_SHIFT 5
-
 static void __iomem *event_base;
 static void __iomem *sts_base;
 
@@ -232,7 +230,6 @@ err:
        register_current_timer_delay(&msm_delay_timer);
 }
 
-#ifdef CONFIG_ARCH_QCOM
 static void __init msm_dt_timer_init(struct device_node *np)
 {
        u32 freq;
@@ -285,59 +282,3 @@ static void __init msm_dt_timer_init(struct device_node *np)
 }
 CLOCKSOURCE_OF_DECLARE(kpss_timer, "qcom,kpss-timer", msm_dt_timer_init);
 CLOCKSOURCE_OF_DECLARE(scss_timer, "qcom,scss-timer", msm_dt_timer_init);
-#else
-
-static int __init msm_timer_map(phys_addr_t addr, u32 event, u32 source,
-                               u32 sts)
-{
-       void __iomem *base;
-
-       base = ioremap(addr, SZ_256);
-       if (!base) {
-               pr_err("Failed to map timer base\n");
-               return -ENOMEM;
-       }
-       event_base = base + event;
-       source_base = base + source;
-       if (sts)
-               sts_base = base + sts;
-
-       return 0;
-}
-
-static notrace cycle_t msm_read_timer_count_shift(struct clocksource *cs)
-{
-       /*
-        * Shift timer count down by a constant due to unreliable lower bits
-        * on some targets.
-        */
-       return msm_read_timer_count(cs) >> MSM_DGT_SHIFT;
-}
-
-void __init msm7x01_timer_init(void)
-{
-       struct clocksource *cs = &msm_clocksource;
-
-       if (msm_timer_map(0xc0100000, 0x0, 0x10, 0x0))
-               return;
-       cs->read = msm_read_timer_count_shift;
-       cs->mask = CLOCKSOURCE_MASK((32 - MSM_DGT_SHIFT));
-       /* 600 KHz */
-       msm_timer_init(19200000 >> MSM_DGT_SHIFT, 32 - MSM_DGT_SHIFT, 7,
-                       false);
-}
-
-void __init msm7x30_timer_init(void)
-{
-       if (msm_timer_map(0xc0100000, 0x4, 0x24, 0x80))
-               return;
-       msm_timer_init(24576000 / 4, 32, 1, false);
-}
-
-void __init qsd8x50_timer_init(void)
-{
-       if (msm_timer_map(0xAC100000, 0x0, 0x10, 0x34))
-               return;
-       msm_timer_init(19200000 / 4, 32, 7, false);
-}
-#endif
diff --git a/drivers/clocksource/time-lpc32xx.c b/drivers/clocksource/time-lpc32xx.c
new file mode 100644 (file)
index 0000000..a1c06a2
--- /dev/null
@@ -0,0 +1,272 @@
+/*
+ * Clocksource driver for NXP LPC32xx/18xx/43xx timer
+ *
+ * Copyright (C) 2015 Joachim Eastwood <manabian@gmail.com>
+ *
+ * Based on:
+ * time-efm32 Copyright (C) 2013 Pengutronix
+ * mach-lpc32xx/timer.c Copyright (C) 2009 - 2010 NXP Semiconductors
+ *
+ * This file is licensed under the terms of the GNU General Public
+ * License version 2. This program is licensed "as is" without any
+ * warranty of any kind, whether express or implied.
+ *
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/sched_clock.h>
+
+#define LPC32XX_TIMER_IR               0x000
+#define  LPC32XX_TIMER_IR_MR0INT       BIT(0)
+#define LPC32XX_TIMER_TCR              0x004
+#define  LPC32XX_TIMER_TCR_CEN         BIT(0)
+#define  LPC32XX_TIMER_TCR_CRST                BIT(1)
+#define LPC32XX_TIMER_TC               0x008
+#define LPC32XX_TIMER_PR               0x00c
+#define LPC32XX_TIMER_MCR              0x014
+#define  LPC32XX_TIMER_MCR_MR0I                BIT(0)
+#define  LPC32XX_TIMER_MCR_MR0R                BIT(1)
+#define  LPC32XX_TIMER_MCR_MR0S                BIT(2)
+#define LPC32XX_TIMER_MR0              0x018
+#define LPC32XX_TIMER_CTCR             0x070
+
+struct lpc32xx_clock_event_ddata {
+       struct clock_event_device evtdev;
+       void __iomem *base;
+};
+
+/* Needed for the sched clock */
+static void __iomem *clocksource_timer_counter;
+
+static u64 notrace lpc32xx_read_sched_clock(void)
+{
+       return readl(clocksource_timer_counter);
+}
+
+static int lpc32xx_clkevt_next_event(unsigned long delta,
+                                    struct clock_event_device *evtdev)
+{
+       struct lpc32xx_clock_event_ddata *ddata =
+               container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev);
+
+       /*
+        * Place timer in reset and program the delta in the prescale
+        * register (PR). When the prescale counter matches the value
+        * in PR the counter register is incremented and the compare
+        * match will trigger. After setup the timer is released from
+        * reset and enabled.
+        */
+       writel_relaxed(LPC32XX_TIMER_TCR_CRST, ddata->base + LPC32XX_TIMER_TCR);
+       writel_relaxed(delta, ddata->base + LPC32XX_TIMER_PR);
+       writel_relaxed(LPC32XX_TIMER_TCR_CEN, ddata->base + LPC32XX_TIMER_TCR);
+
+       return 0;
+}
+
+static int lpc32xx_clkevt_shutdown(struct clock_event_device *evtdev)
+{
+       struct lpc32xx_clock_event_ddata *ddata =
+               container_of(evtdev, struct lpc32xx_clock_event_ddata, evtdev);
+
+       /* Disable the timer */
+       writel_relaxed(0, ddata->base + LPC32XX_TIMER_TCR);
+
+       return 0;
+}
+
+static int lpc32xx_clkevt_oneshot(struct clock_event_device *evtdev)
+{
+       /*
+        * When using oneshot, we must also disable the timer
+        * to wait for the first call to set_next_event().
+        */
+       return lpc32xx_clkevt_shutdown(evtdev);
+}
+
+static irqreturn_t lpc32xx_clock_event_handler(int irq, void *dev_id)
+{
+       struct lpc32xx_clock_event_ddata *ddata = dev_id;
+
+       /* Clear match on channel 0 */
+       writel_relaxed(LPC32XX_TIMER_IR_MR0INT, ddata->base + LPC32XX_TIMER_IR);
+
+       ddata->evtdev.event_handler(&ddata->evtdev);
+
+       return IRQ_HANDLED;
+}
+
+static struct lpc32xx_clock_event_ddata lpc32xx_clk_event_ddata = {
+       .evtdev = {
+               .name                   = "lpc3220 clockevent",
+               .features               = CLOCK_EVT_FEAT_ONESHOT,
+               .rating                 = 300,
+               .set_next_event         = lpc32xx_clkevt_next_event,
+               .set_state_shutdown     = lpc32xx_clkevt_shutdown,
+               .set_state_oneshot      = lpc32xx_clkevt_oneshot,
+       },
+};
+
+static int __init lpc32xx_clocksource_init(struct device_node *np)
+{
+       void __iomem *base;
+       unsigned long rate;
+       struct clk *clk;
+       int ret;
+
+       clk = of_clk_get_by_name(np, "timerclk");
+       if (IS_ERR(clk)) {
+               pr_err("clock get failed (%lu)\n", PTR_ERR(clk));
+               return PTR_ERR(clk);
+       }
+
+       ret = clk_prepare_enable(clk);
+       if (ret) {
+               pr_err("clock enable failed (%d)\n", ret);
+               goto err_clk_enable;
+       }
+
+       base = of_iomap(np, 0);
+       if (!base) {
+               pr_err("unable to map registers\n");
+               ret = -EADDRNOTAVAIL;
+               goto err_iomap;
+       }
+
+       /*
+        * Disable and reset timer then set it to free running timer
+        * mode (CTCR) with no prescaler (PR) or match operations (MCR).
+        * After setup the timer is released from reset and enabled.
+        */
+       writel_relaxed(LPC32XX_TIMER_TCR_CRST, base + LPC32XX_TIMER_TCR);
+       writel_relaxed(0, base + LPC32XX_TIMER_PR);
+       writel_relaxed(0, base + LPC32XX_TIMER_MCR);
+       writel_relaxed(0, base + LPC32XX_TIMER_CTCR);
+       writel_relaxed(LPC32XX_TIMER_TCR_CEN, base + LPC32XX_TIMER_TCR);
+
+       rate = clk_get_rate(clk);
+       ret = clocksource_mmio_init(base + LPC32XX_TIMER_TC, "lpc3220 timer",
+                                   rate, 300, 32, clocksource_mmio_readl_up);
+       if (ret) {
+               pr_err("failed to init clocksource (%d)\n", ret);
+               goto err_clocksource_init;
+       }
+
+       clocksource_timer_counter = base + LPC32XX_TIMER_TC;
+       sched_clock_register(lpc32xx_read_sched_clock, 32, rate);
+
+       return 0;
+
+err_clocksource_init:
+       iounmap(base);
+err_iomap:
+       clk_disable_unprepare(clk);
+err_clk_enable:
+       clk_put(clk);
+       return ret;
+}
+
+static int __init lpc32xx_clockevent_init(struct device_node *np)
+{
+       void __iomem *base;
+       unsigned long rate;
+       struct clk *clk;
+       int ret, irq;
+
+       clk = of_clk_get_by_name(np, "timerclk");
+       if (IS_ERR(clk)) {
+               pr_err("clock get failed (%lu)\n", PTR_ERR(clk));
+               return PTR_ERR(clk);
+       }
+
+       ret = clk_prepare_enable(clk);
+       if (ret) {
+               pr_err("clock enable failed (%d)\n", ret);
+               goto err_clk_enable;
+       }
+
+       base = of_iomap(np, 0);
+       if (!base) {
+               pr_err("unable to map registers\n");
+               ret = -EADDRNOTAVAIL;
+               goto err_iomap;
+       }
+
+       irq = irq_of_parse_and_map(np, 0);
+       if (!irq) {
+               pr_err("get irq failed\n");
+               ret = -ENOENT;
+               goto err_irq;
+       }
+
+       /*
+        * Disable timer and clear any pending interrupt (IR) on match
+        * channel 0 (MR0). Configure a compare match value of 1 on MR0
+        * and enable interrupt, reset on match and stop on match (MCR).
+        */
+       writel_relaxed(0, base + LPC32XX_TIMER_TCR);
+       writel_relaxed(0, base + LPC32XX_TIMER_CTCR);
+       writel_relaxed(LPC32XX_TIMER_IR_MR0INT, base + LPC32XX_TIMER_IR);
+       writel_relaxed(1, base + LPC32XX_TIMER_MR0);
+       writel_relaxed(LPC32XX_TIMER_MCR_MR0I | LPC32XX_TIMER_MCR_MR0R |
+                      LPC32XX_TIMER_MCR_MR0S, base + LPC32XX_TIMER_MCR);
+
+       rate = clk_get_rate(clk);
+       lpc32xx_clk_event_ddata.base = base;
+       clockevents_config_and_register(&lpc32xx_clk_event_ddata.evtdev,
+                                       rate, 1, -1);
+
+       ret = request_irq(irq, lpc32xx_clock_event_handler,
+                         IRQF_TIMER | IRQF_IRQPOLL, "lpc3220 clockevent",
+                         &lpc32xx_clk_event_ddata);
+       if (ret) {
+               pr_err("request irq failed\n");
+               goto err_irq;
+       }
+
+       return 0;
+
+err_irq:
+       iounmap(base);
+err_iomap:
+       clk_disable_unprepare(clk);
+err_clk_enable:
+       clk_put(clk);
+       return ret;
+}
+
+/*
+ * This function asserts that we have exactly one clocksource and one
+ * clock_event_device in the end.
+ */
+static void __init lpc32xx_timer_init(struct device_node *np)
+{
+       static int has_clocksource, has_clockevent;
+       int ret;
+
+       if (!has_clocksource) {
+               ret = lpc32xx_clocksource_init(np);
+               if (!ret) {
+                       has_clocksource = 1;
+                       return;
+               }
+       }
+
+       if (!has_clockevent) {
+               ret = lpc32xx_clockevent_init(np);
+               if (!ret) {
+                       has_clockevent = 1;
+                       return;
+               }
+       }
+}
+CLOCKSOURCE_OF_DECLARE(lpc32xx_timer, "nxp,lpc3220-timer", lpc32xx_timer_init);
index b9efd30513d56214612913dfc0f0a30b9464ea70..c97d1980c0f856f37b3f264d6b525f3a9e68540a 100644 (file)
@@ -166,7 +166,7 @@ static void __init integrator_ap_timer_init_of(struct device_node *node)
        struct device_node *sec_node;
 
        base = of_io_request_and_map(node, 0, "integrator-timer");
-       if (!base)
+       if (IS_ERR(base))
                return;
 
        clk = of_clk_get(node, 0);
diff --git a/drivers/clocksource/timer-stm32.c b/drivers/clocksource/timer-stm32.c
new file mode 100644 (file)
index 0000000..a97e8b5
--- /dev/null
@@ -0,0 +1,184 @@
+/*
+ * Copyright (C) Maxime Coquelin 2015
+ * Author:  Maxime Coquelin <mcoquelin.stm32@gmail.com>
+ * License terms:  GNU General Public License (GPL), version 2
+ *
+ * Inspired by time-efm32.c from Uwe Kleine-Koenig
+ */
+
+#include <linux/kernel.h>
+#include <linux/clocksource.h>
+#include <linux/clockchips.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/clk.h>
+#include <linux/reset.h>
+
+#define TIM_CR1                0x00
+#define TIM_DIER       0x0c
+#define TIM_SR         0x10
+#define TIM_EGR                0x14
+#define TIM_PSC                0x28
+#define TIM_ARR                0x2c
+
+#define TIM_CR1_CEN    BIT(0)
+#define TIM_CR1_OPM    BIT(3)
+#define TIM_CR1_ARPE   BIT(7)
+
+#define TIM_DIER_UIE   BIT(0)
+
+#define TIM_SR_UIF     BIT(0)
+
+#define TIM_EGR_UG     BIT(0)
+
+struct stm32_clock_event_ddata {
+       struct clock_event_device evtdev;
+       unsigned periodic_top;
+       void __iomem *base;
+};
+
+static void stm32_clock_event_set_mode(enum clock_event_mode mode,
+                                      struct clock_event_device *evtdev)
+{
+       struct stm32_clock_event_ddata *data =
+               container_of(evtdev, struct stm32_clock_event_ddata, evtdev);
+       void *base = data->base;
+
+       switch (mode) {
+       case CLOCK_EVT_MODE_PERIODIC:
+               writel_relaxed(data->periodic_top, base + TIM_ARR);
+               writel_relaxed(TIM_CR1_ARPE | TIM_CR1_CEN, base + TIM_CR1);
+               break;
+
+       case CLOCK_EVT_MODE_ONESHOT:
+       default:
+               writel_relaxed(0, base + TIM_CR1);
+               break;
+       }
+}
+
+static int stm32_clock_event_set_next_event(unsigned long evt,
+                                           struct clock_event_device *evtdev)
+{
+       struct stm32_clock_event_ddata *data =
+               container_of(evtdev, struct stm32_clock_event_ddata, evtdev);
+
+       writel_relaxed(evt, data->base + TIM_ARR);
+       writel_relaxed(TIM_CR1_ARPE | TIM_CR1_OPM | TIM_CR1_CEN,
+                      data->base + TIM_CR1);
+
+       return 0;
+}
+
+static irqreturn_t stm32_clock_event_handler(int irq, void *dev_id)
+{
+       struct stm32_clock_event_ddata *data = dev_id;
+
+       writel_relaxed(0, data->base + TIM_SR);
+
+       data->evtdev.event_handler(&data->evtdev);
+
+       return IRQ_HANDLED;
+}
+
+static struct stm32_clock_event_ddata clock_event_ddata = {
+       .evtdev = {
+               .name = "stm32 clockevent",
+               .features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC,
+               .set_mode = stm32_clock_event_set_mode,
+               .set_next_event = stm32_clock_event_set_next_event,
+               .rating = 200,
+       },
+};
+
+static void __init stm32_clockevent_init(struct device_node *np)
+{
+       struct stm32_clock_event_ddata *data = &clock_event_ddata;
+       struct clk *clk;
+       struct reset_control *rstc;
+       unsigned long rate, max_delta;
+       int irq, ret, bits, prescaler = 1;
+
+       clk = of_clk_get(np, 0);
+       if (IS_ERR(clk)) {
+               ret = PTR_ERR(clk);
+               pr_err("failed to get clock for clockevent (%d)\n", ret);
+               goto err_clk_get;
+       }
+
+       ret = clk_prepare_enable(clk);
+       if (ret) {
+               pr_err("failed to enable timer clock for clockevent (%d)\n",
+                      ret);
+               goto err_clk_enable;
+       }
+
+       rate = clk_get_rate(clk);
+
+       rstc = of_reset_control_get(np, NULL);
+       if (!IS_ERR(rstc)) {
+               reset_control_assert(rstc);
+               reset_control_deassert(rstc);
+       }
+
+       data->base = of_iomap(np, 0);
+       if (!data->base) {
+               pr_err("failed to map registers for clockevent\n");
+               goto err_iomap;
+       }
+
+       irq = irq_of_parse_and_map(np, 0);
+       if (!irq) {
+               pr_err("%s: failed to get irq.\n", np->full_name);
+               goto err_get_irq;
+       }
+
+       /* Detect whether the timer is 16 or 32 bits */
+       writel_relaxed(~0U, data->base + TIM_ARR);
+       max_delta = readl_relaxed(data->base + TIM_ARR);
+       if (max_delta == ~0U) {
+               prescaler = 1;
+               bits = 32;
+       } else {
+               prescaler = 1024;
+               bits = 16;
+       }
+       writel_relaxed(0, data->base + TIM_ARR);
+
+       writel_relaxed(prescaler - 1, data->base + TIM_PSC);
+       writel_relaxed(TIM_EGR_UG, data->base + TIM_EGR);
+       writel_relaxed(TIM_DIER_UIE, data->base + TIM_DIER);
+       writel_relaxed(0, data->base + TIM_SR);
+
+       data->periodic_top = DIV_ROUND_CLOSEST(rate, prescaler * HZ);
+
+       clockevents_config_and_register(&data->evtdev,
+                                       DIV_ROUND_CLOSEST(rate, prescaler),
+                                       0x1, max_delta);
+
+       ret = request_irq(irq, stm32_clock_event_handler, IRQF_TIMER,
+                       "stm32 clockevent", data);
+       if (ret) {
+               pr_err("%s: failed to request irq.\n", np->full_name);
+               goto err_get_irq;
+       }
+
+       pr_info("%s: STM32 clockevent driver initialized (%d bits)\n",
+                       np->full_name, bits);
+
+       return;
+
+err_get_irq:
+       iounmap(data->base);
+err_iomap:
+       clk_disable_unprepare(clk);
+err_clk_enable:
+       clk_put(clk);
+err_clk_get:
+       return;
+}
+
+CLOCKSOURCE_OF_DECLARE(stm32, "st,stm32-timer", stm32_clockevent_init);
index 28aa4b7bb6020c416974ec52c8f80eb12366e705..0ffb4ea7c9253eb883afe47cd9682722f5c588c6 100644 (file)
@@ -324,7 +324,7 @@ static void __init sun5i_timer_init(struct device_node *node)
        int irq;
 
        timer_base = of_io_request_and_map(node, 0, of_node_full_name(node));
-       if (!timer_base)
+       if (IS_ERR(timer_base))
                panic("Can't map registers");
 
        irq = irq_of_parse_and_map(node, 0);
index b0c18ed8d83f707d000213e458dba613e4ffaf96..0136dfcdabf0bad0382639566ba8707a464ddcaf 100644 (file)
@@ -699,13 +699,14 @@ static int acpi_cpufreq_cpu_init(struct cpufreq_policy *policy)
        dmi_check_system(sw_any_bug_dmi_table);
        if (bios_with_sw_any_bug && !policy_is_shared(policy)) {
                policy->shared_type = CPUFREQ_SHARED_TYPE_ALL;
-               cpumask_copy(policy->cpus, cpu_core_mask(cpu));
+               cpumask_copy(policy->cpus, topology_core_cpumask(cpu));
        }
 
        if (check_amd_hwpstate_cpu(cpu) && !acpi_pstate_strict) {
                cpumask_clear(policy->cpus);
                cpumask_set_cpu(cpu, policy->cpus);
-               cpumask_copy(data->freqdomain_cpus, cpu_sibling_mask(cpu));
+               cpumask_copy(data->freqdomain_cpus,
+                            topology_sibling_cpumask(cpu));
                policy->shared_type = CPUFREQ_SHARED_TYPE_HW;
                pr_info_once(PFX "overriding BIOS provided _PSD data\n");
        }
index 6414661ac1c46a2ccdbaa2408e2491ac93e4dc46..2ba53f4f6af228e7e06384611bad6d9129435500 100644 (file)
@@ -26,6 +26,7 @@
 #include <linux/fs.h>
 #include <linux/debugfs.h>
 #include <linux/acpi.h>
+#include <linux/vmalloc.h>
 #include <trace/events/power.h>
 
 #include <asm/div64.h>
index 529cfd92158fa6f7e0e9cfb79d88307c83c57ac3..5dd95dab580d1cf30135ff39ce2cfd81d9be9b7f 100644 (file)
@@ -172,7 +172,7 @@ static int cpufreq_p4_cpu_init(struct cpufreq_policy *policy)
        unsigned int i;
 
 #ifdef CONFIG_SMP
-       cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
+       cpumask_copy(policy->cpus, topology_sibling_cpumask(policy->cpu));
 #endif
 
        /* Errata workaround */
index f9ce7e4bf0feae587913bb46b94539e35a572fb9..5c035d04d827106505c26c08ac649d7ffc907401 100644 (file)
@@ -57,13 +57,6 @@ static DEFINE_PER_CPU(struct powernow_k8_data *, powernow_data);
 
 static struct cpufreq_driver cpufreq_amd64_driver;
 
-#ifndef CONFIG_SMP
-static inline const struct cpumask *cpu_core_mask(int cpu)
-{
-       return cpumask_of(0);
-}
-#endif
-
 /* Return a frequency in MHz, given an input fid */
 static u32 find_freq_from_fid(u32 fid)
 {
@@ -620,7 +613,7 @@ static int fill_powernow_table(struct powernow_k8_data *data,
 
        pr_debug("cfid 0x%x, cvid 0x%x\n", data->currfid, data->currvid);
        data->powernow_table = powernow_table;
-       if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu)
+       if (cpumask_first(topology_core_cpumask(data->cpu)) == data->cpu)
                print_basics(data);
 
        for (j = 0; j < data->numps; j++)
@@ -784,7 +777,7 @@ static int powernow_k8_cpu_init_acpi(struct powernow_k8_data *data)
                CPUFREQ_TABLE_END;
        data->powernow_table = powernow_table;
 
-       if (cpumask_first(cpu_core_mask(data->cpu)) == data->cpu)
+       if (cpumask_first(topology_core_cpumask(data->cpu)) == data->cpu)
                print_basics(data);
 
        /* notify BIOS that we exist */
@@ -1090,7 +1083,7 @@ static int powernowk8_cpu_init(struct cpufreq_policy *pol)
        if (rc != 0)
                goto err_out_exit_acpi;
 
-       cpumask_copy(pol->cpus, cpu_core_mask(pol->cpu));
+       cpumask_copy(pol->cpus, topology_core_cpumask(pol->cpu));
        data->available_cores = pol->cpus;
 
        /* min/max the cpu is capable of */
index e56d632a8b2107be82c7fffe94c8cf57d02aaca1..37555c6b86a7cf843f04187a24c86dfe99aa8d42 100644 (file)
@@ -292,7 +292,7 @@ static int speedstep_cpu_init(struct cpufreq_policy *policy)
 
        /* only run on CPU to be set, or on its sibling */
 #ifdef CONFIG_SMP
-       cpumask_copy(policy->cpus, cpu_sibling_mask(policy->cpu));
+       cpumask_copy(policy->cpus, topology_sibling_cpumask(policy->cpu));
 #endif
        policy_cpu = cpumask_any_and(policy->cpus, cpu_online_mask);
 
index ba0532efd3ae68d0368a00a1018dd22ee76f99b0..332c8ef8dae2cc262540f0d168dffa2266d82c73 100644 (file)
@@ -1544,6 +1544,8 @@ static int ahash_init(struct ahash_request *req)
 
        state->current_buf = 0;
        state->buf_dma = 0;
+       state->buflen_0 = 0;
+       state->buflen_1 = 0;
 
        return 0;
 }
index 26a544b505f1e17166f95cf0e0dccdc5191015d3..5095337205b830c148696a37d53a8902643b317f 100644 (file)
@@ -56,7 +56,7 @@
 
 /* Buffer, its dma address and lock */
 struct buf_data {
-       u8 buf[RN_BUF_SIZE];
+       u8 buf[RN_BUF_SIZE] ____cacheline_aligned;
        dma_addr_t addr;
        struct completion filled;
        u32 hw_desc[DESC_JOB_O_LEN];
index c178ed8c3908d3a92e55432aecb86e0e6e1501ee..da2d6777bd092f0a373e14cc960514b8a4f5d148 100644 (file)
@@ -22,7 +22,7 @@
 #include <asm/cpu_device_id.h>
 #include <asm/byteorder.h>
 #include <asm/processor.h>
-#include <asm/i387.h>
+#include <asm/fpu/api.h>
 
 /*
  * Number of data blocks actually fetched for each xcrypt insn.
index 95f7d27ce491f000458a257e5dfa55a6105b433b..4e154c9b92064bb1fbafeb805bcb77f9cc2d1bdf 100644 (file)
@@ -23,7 +23,7 @@
 #include <linux/kernel.h>
 #include <linux/scatterlist.h>
 #include <asm/cpu_device_id.h>
-#include <asm/i387.h>
+#include <asm/fpu/api.h>
 
 struct padlock_sha_desc {
        struct shash_desc fallback;
index ab300ea19434e3f93d9b08475f27ee59cf445390..a9064e36e7b5488c4f47f628bb317c48b7efa485 100644 (file)
@@ -78,12 +78,14 @@ static int p8_aes_setkey(struct crypto_tfm *tfm, const u8 *key,
     int ret;
     struct p8_aes_ctx *ctx = crypto_tfm_ctx(tfm);
 
+    preempt_disable();
     pagefault_disable();
     enable_kernel_altivec();
     ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
     ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
     pagefault_enable();
-    
+    preempt_enable();
+
     ret += crypto_cipher_setkey(ctx->fallback, key, keylen);
     return ret;
 }
@@ -95,10 +97,12 @@ static void p8_aes_encrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
     if (in_interrupt()) {
         crypto_cipher_encrypt_one(ctx->fallback, dst, src);
     } else {
+       preempt_disable();
         pagefault_disable();
         enable_kernel_altivec();
         aes_p8_encrypt(src, dst, &ctx->enc_key);
         pagefault_enable();
+       preempt_enable();
     }
 }
 
@@ -109,10 +113,12 @@ static void p8_aes_decrypt(struct crypto_tfm *tfm, u8 *dst, const u8 *src)
     if (in_interrupt()) {
         crypto_cipher_decrypt_one(ctx->fallback, dst, src);
     } else {
+       preempt_disable();
         pagefault_disable();
         enable_kernel_altivec();
         aes_p8_decrypt(src, dst, &ctx->dec_key);
         pagefault_enable();
+       preempt_enable();
     }
 }
 
index 1a559b7dddb5f2f5ff3184e0d50c275bf498aa3c..477284abdd11dc9d738fe08858d7cbe48a65fe32 100644 (file)
@@ -79,11 +79,13 @@ static int p8_aes_cbc_setkey(struct crypto_tfm *tfm, const u8 *key,
     int ret;
     struct p8_aes_cbc_ctx *ctx = crypto_tfm_ctx(tfm);
 
+    preempt_disable();
     pagefault_disable();
     enable_kernel_altivec();
     ret = aes_p8_set_encrypt_key(key, keylen * 8, &ctx->enc_key);
     ret += aes_p8_set_decrypt_key(key, keylen * 8, &ctx->dec_key);
     pagefault_enable();
+    preempt_enable();
 
     ret += crypto_blkcipher_setkey(ctx->fallback, key, keylen);
     return ret;
@@ -106,6 +108,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
     if (in_interrupt()) {
         ret = crypto_blkcipher_encrypt(&fallback_desc, dst, src, nbytes);
     } else {
+       preempt_disable();
         pagefault_disable();
         enable_kernel_altivec();
 
@@ -119,6 +122,7 @@ static int p8_aes_cbc_encrypt(struct blkcipher_desc *desc,
        }
 
         pagefault_enable();
+       preempt_enable();
     }
 
     return ret;
@@ -141,6 +145,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
     if (in_interrupt()) {
         ret = crypto_blkcipher_decrypt(&fallback_desc, dst, src, nbytes);
     } else {
+       preempt_disable();
         pagefault_disable();
         enable_kernel_altivec();
 
@@ -154,6 +159,7 @@ static int p8_aes_cbc_decrypt(struct blkcipher_desc *desc,
                }
 
         pagefault_enable();
+       preempt_enable();
     }
 
     return ret;
index d0ffe277af5ca583157afbf881d5df2fbdec5bf5..f255ec4a04d48d60a28e1025b3e2473cdcb7d8fc 100644 (file)
@@ -114,11 +114,13 @@ static int p8_ghash_setkey(struct crypto_shash *tfm, const u8 *key,
     if (keylen != GHASH_KEY_LEN)
         return -EINVAL;
 
+    preempt_disable();
     pagefault_disable();
     enable_kernel_altivec();
     enable_kernel_fp();
     gcm_init_p8(ctx->htable, (const u64 *) key);
     pagefault_enable();
+    preempt_enable();
     return crypto_shash_setkey(ctx->fallback, key, keylen);
 }
 
@@ -140,23 +142,27 @@ static int p8_ghash_update(struct shash_desc *desc,
             }
             memcpy(dctx->buffer + dctx->bytes, src,
                     GHASH_DIGEST_SIZE - dctx->bytes);
+           preempt_disable();
             pagefault_disable();
             enable_kernel_altivec();
             enable_kernel_fp();
             gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer,
                     GHASH_DIGEST_SIZE);
             pagefault_enable();
+           preempt_enable();
             src += GHASH_DIGEST_SIZE - dctx->bytes;
             srclen -= GHASH_DIGEST_SIZE - dctx->bytes;
             dctx->bytes = 0;
         }
         len = srclen & ~(GHASH_DIGEST_SIZE - 1);
         if (len) {
+           preempt_disable();
             pagefault_disable();
             enable_kernel_altivec();
             enable_kernel_fp();
             gcm_ghash_p8(dctx->shash, ctx->htable, src, len);
             pagefault_enable();
+           preempt_enable();
             src += len;
             srclen -= len;
         }
@@ -180,12 +186,14 @@ static int p8_ghash_final(struct shash_desc *desc, u8 *out)
         if (dctx->bytes) {
             for (i = dctx->bytes; i < GHASH_DIGEST_SIZE; i++)
                 dctx->buffer[i] = 0;
+           preempt_disable();
             pagefault_disable();
             enable_kernel_altivec();
             enable_kernel_fp();
             gcm_ghash_p8(dctx->shash, ctx->htable, dctx->buffer,
                     GHASH_DIGEST_SIZE);
             pagefault_enable();
+           preempt_enable();
             dctx->bytes = 0;
         }
         memcpy(out, dctx->shash, GHASH_DIGEST_SIZE);
index 933e4b338459284465d7970e3ff7dbf0f37314b8..7992164ea9ec2849f6ac3691629c47cda30aeb28 100644 (file)
 #define AT_XDMAC_MBR_UBC_NDV3          (0x3 << 27)     /* Next Descriptor View 3 */
 
 #define AT_XDMAC_MAX_CHAN      0x20
+#define AT_XDMAC_MAX_CSIZE     16      /* 16 data */
+#define AT_XDMAC_MAX_DWIDTH    8       /* 64 bits */
 
 #define AT_XDMAC_DMA_BUSWIDTHS\
        (BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) |\
@@ -192,20 +194,17 @@ struct at_xdmac_chan {
        struct dma_chan                 chan;
        void __iomem                    *ch_regs;
        u32                             mask;           /* Channel Mask */
-       u32                             cfg[2];         /* Channel Configuration Register */
-       #define AT_XDMAC_DEV_TO_MEM_CFG 0               /* Predifined dev to mem channel conf */
-       #define AT_XDMAC_MEM_TO_DEV_CFG 1               /* Predifined mem to dev channel conf */
+       u32                             cfg;            /* Channel Configuration Register */
        u8                              perid;          /* Peripheral ID */
        u8                              perif;          /* Peripheral Interface */
        u8                              memif;          /* Memory Interface */
-       u32                             per_src_addr;
-       u32                             per_dst_addr;
        u32                             save_cc;
        u32                             save_cim;
        u32                             save_cnda;
        u32                             save_cndc;
        unsigned long                   status;
        struct tasklet_struct           tasklet;
+       struct dma_slave_config         sconfig;
 
        spinlock_t                      lock;
 
@@ -415,8 +414,9 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
        struct at_xdmac_desc    *desc = txd_to_at_desc(tx);
        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(tx->chan);
        dma_cookie_t            cookie;
+       unsigned long           irqflags;
 
-       spin_lock_bh(&atchan->lock);
+       spin_lock_irqsave(&atchan->lock, irqflags);
        cookie = dma_cookie_assign(tx);
 
        dev_vdbg(chan2dev(tx->chan), "%s: atchan 0x%p, add desc 0x%p to xfers_list\n",
@@ -425,7 +425,7 @@ static dma_cookie_t at_xdmac_tx_submit(struct dma_async_tx_descriptor *tx)
        if (list_is_singular(&atchan->xfers_list))
                at_xdmac_start_xfer(atchan, desc);
 
-       spin_unlock_bh(&atchan->lock);
+       spin_unlock_irqrestore(&atchan->lock, irqflags);
        return cookie;
 }
 
@@ -494,61 +494,94 @@ static struct dma_chan *at_xdmac_xlate(struct of_phandle_args *dma_spec,
        return chan;
 }
 
+static int at_xdmac_compute_chan_conf(struct dma_chan *chan,
+                                     enum dma_transfer_direction direction)
+{
+       struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
+       int                     csize, dwidth;
+
+       if (direction == DMA_DEV_TO_MEM) {
+               atchan->cfg =
+                       AT91_XDMAC_DT_PERID(atchan->perid)
+                       | AT_XDMAC_CC_DAM_INCREMENTED_AM
+                       | AT_XDMAC_CC_SAM_FIXED_AM
+                       | AT_XDMAC_CC_DIF(atchan->memif)
+                       | AT_XDMAC_CC_SIF(atchan->perif)
+                       | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
+                       | AT_XDMAC_CC_DSYNC_PER2MEM
+                       | AT_XDMAC_CC_MBSIZE_SIXTEEN
+                       | AT_XDMAC_CC_TYPE_PER_TRAN;
+               csize = ffs(atchan->sconfig.src_maxburst) - 1;
+               if (csize < 0) {
+                       dev_err(chan2dev(chan), "invalid src maxburst value\n");
+                       return -EINVAL;
+               }
+               atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
+               dwidth = ffs(atchan->sconfig.src_addr_width) - 1;
+               if (dwidth < 0) {
+                       dev_err(chan2dev(chan), "invalid src addr width value\n");
+                       return -EINVAL;
+               }
+               atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
+       } else if (direction == DMA_MEM_TO_DEV) {
+               atchan->cfg =
+                       AT91_XDMAC_DT_PERID(atchan->perid)
+                       | AT_XDMAC_CC_DAM_FIXED_AM
+                       | AT_XDMAC_CC_SAM_INCREMENTED_AM
+                       | AT_XDMAC_CC_DIF(atchan->perif)
+                       | AT_XDMAC_CC_SIF(atchan->memif)
+                       | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
+                       | AT_XDMAC_CC_DSYNC_MEM2PER
+                       | AT_XDMAC_CC_MBSIZE_SIXTEEN
+                       | AT_XDMAC_CC_TYPE_PER_TRAN;
+               csize = ffs(atchan->sconfig.dst_maxburst) - 1;
+               if (csize < 0) {
+                       dev_err(chan2dev(chan), "invalid src maxburst value\n");
+                       return -EINVAL;
+               }
+               atchan->cfg |= AT_XDMAC_CC_CSIZE(csize);
+               dwidth = ffs(atchan->sconfig.dst_addr_width) - 1;
+               if (dwidth < 0) {
+                       dev_err(chan2dev(chan), "invalid dst addr width value\n");
+                       return -EINVAL;
+               }
+               atchan->cfg |= AT_XDMAC_CC_DWIDTH(dwidth);
+       }
+
+       dev_dbg(chan2dev(chan), "%s: cfg=0x%08x\n", __func__, atchan->cfg);
+
+       return 0;
+}
+
+/*
+ * Only check that maxburst and addr width values are supported by the
+ * the controller but not that the configuration is good to perform the
+ * transfer since we don't know the direction at this stage.
+ */
+static int at_xdmac_check_slave_config(struct dma_slave_config *sconfig)
+{
+       if ((sconfig->src_maxburst > AT_XDMAC_MAX_CSIZE)
+           || (sconfig->dst_maxburst > AT_XDMAC_MAX_CSIZE))
+               return -EINVAL;
+
+       if ((sconfig->src_addr_width > AT_XDMAC_MAX_DWIDTH)
+           || (sconfig->dst_addr_width > AT_XDMAC_MAX_DWIDTH))
+               return -EINVAL;
+
+       return 0;
+}
+
 static int at_xdmac_set_slave_config(struct dma_chan *chan,
                                      struct dma_slave_config *sconfig)
 {
        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
-       u8 dwidth;
-       int csize;
 
-       atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] =
-               AT91_XDMAC_DT_PERID(atchan->perid)
-               | AT_XDMAC_CC_DAM_INCREMENTED_AM
-               | AT_XDMAC_CC_SAM_FIXED_AM
-               | AT_XDMAC_CC_DIF(atchan->memif)
-               | AT_XDMAC_CC_SIF(atchan->perif)
-               | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
-               | AT_XDMAC_CC_DSYNC_PER2MEM
-               | AT_XDMAC_CC_MBSIZE_SIXTEEN
-               | AT_XDMAC_CC_TYPE_PER_TRAN;
-       csize = at_xdmac_csize(sconfig->src_maxburst);
-       if (csize < 0) {
-               dev_err(chan2dev(chan), "invalid src maxburst value\n");
+       if (at_xdmac_check_slave_config(sconfig)) {
+               dev_err(chan2dev(chan), "invalid slave configuration\n");
                return -EINVAL;
        }
-       atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_CSIZE(csize);
-       dwidth = ffs(sconfig->src_addr_width) - 1;
-       atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth);
-
-
-       atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] =
-               AT91_XDMAC_DT_PERID(atchan->perid)
-               | AT_XDMAC_CC_DAM_FIXED_AM
-               | AT_XDMAC_CC_SAM_INCREMENTED_AM
-               | AT_XDMAC_CC_DIF(atchan->perif)
-               | AT_XDMAC_CC_SIF(atchan->memif)
-               | AT_XDMAC_CC_SWREQ_HWR_CONNECTED
-               | AT_XDMAC_CC_DSYNC_MEM2PER
-               | AT_XDMAC_CC_MBSIZE_SIXTEEN
-               | AT_XDMAC_CC_TYPE_PER_TRAN;
-       csize = at_xdmac_csize(sconfig->dst_maxburst);
-       if (csize < 0) {
-               dev_err(chan2dev(chan), "invalid src maxburst value\n");
-               return -EINVAL;
-       }
-       atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_CSIZE(csize);
-       dwidth = ffs(sconfig->dst_addr_width) - 1;
-       atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG] |= AT_XDMAC_CC_DWIDTH(dwidth);
-
-       /* Src and dst addr are needed to configure the link list descriptor. */
-       atchan->per_src_addr = sconfig->src_addr;
-       atchan->per_dst_addr = sconfig->dst_addr;
 
-       dev_dbg(chan2dev(chan),
-               "%s: cfg[dev2mem]=0x%08x, cfg[mem2dev]=0x%08x, per_src_addr=0x%08x, per_dst_addr=0x%08x\n",
-               __func__, atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG],
-               atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG],
-               atchan->per_src_addr, atchan->per_dst_addr);
+       memcpy(&atchan->sconfig, sconfig, sizeof(atchan->sconfig));
 
        return 0;
 }
@@ -563,6 +596,8 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
        struct scatterlist      *sg;
        int                     i;
        unsigned int            xfer_size = 0;
+       unsigned long           irqflags;
+       struct dma_async_tx_descriptor  *ret = NULL;
 
        if (!sgl)
                return NULL;
@@ -578,7 +613,10 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                 flags);
 
        /* Protect dma_sconfig field that can be modified by set_slave_conf. */
-       spin_lock_bh(&atchan->lock);
+       spin_lock_irqsave(&atchan->lock, irqflags);
+
+       if (at_xdmac_compute_chan_conf(chan, direction))
+               goto spin_unlock;
 
        /* Prepare descriptors. */
        for_each_sg(sgl, sg, sg_len, i) {
@@ -589,8 +627,7 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                mem = sg_dma_address(sg);
                if (unlikely(!len)) {
                        dev_err(chan2dev(chan), "sg data length is zero\n");
-                       spin_unlock_bh(&atchan->lock);
-                       return NULL;
+                       goto spin_unlock;
                }
                dev_dbg(chan2dev(chan), "%s: * sg%d len=%u, mem=0x%08x\n",
                         __func__, i, len, mem);
@@ -600,20 +637,18 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                        dev_err(chan2dev(chan), "can't get descriptor\n");
                        if (first)
                                list_splice_init(&first->descs_list, &atchan->free_descs_list);
-                       spin_unlock_bh(&atchan->lock);
-                       return NULL;
+                       goto spin_unlock;
                }
 
                /* Linked list descriptor setup. */
                if (direction == DMA_DEV_TO_MEM) {
-                       desc->lld.mbr_sa = atchan->per_src_addr;
+                       desc->lld.mbr_sa = atchan->sconfig.src_addr;
                        desc->lld.mbr_da = mem;
-                       desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
                } else {
                        desc->lld.mbr_sa = mem;
-                       desc->lld.mbr_da = atchan->per_dst_addr;
-                       desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
+                       desc->lld.mbr_da = atchan->sconfig.dst_addr;
                }
+               desc->lld.mbr_cfg = atchan->cfg;
                dwidth = at_xdmac_get_dwidth(desc->lld.mbr_cfg);
                fixed_dwidth = IS_ALIGNED(len, 1 << dwidth)
                               ? at_xdmac_get_dwidth(desc->lld.mbr_cfg)
@@ -645,13 +680,15 @@ at_xdmac_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl,
                xfer_size += len;
        }
 
-       spin_unlock_bh(&atchan->lock);
 
        first->tx_dma_desc.flags = flags;
        first->xfer_size = xfer_size;
        first->direction = direction;
+       ret = &first->tx_dma_desc;
 
-       return &first->tx_dma_desc;
+spin_unlock:
+       spin_unlock_irqrestore(&atchan->lock, irqflags);
+       return ret;
 }
 
 static struct dma_async_tx_descriptor *
@@ -664,6 +701,7 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
        struct at_xdmac_desc    *first = NULL, *prev = NULL;
        unsigned int            periods = buf_len / period_len;
        int                     i;
+       unsigned long           irqflags;
 
        dev_dbg(chan2dev(chan), "%s: buf_addr=%pad, buf_len=%zd, period_len=%zd, dir=%s, flags=0x%lx\n",
                __func__, &buf_addr, buf_len, period_len,
@@ -679,32 +717,34 @@ at_xdmac_prep_dma_cyclic(struct dma_chan *chan, dma_addr_t buf_addr,
                return NULL;
        }
 
+       if (at_xdmac_compute_chan_conf(chan, direction))
+               return NULL;
+
        for (i = 0; i < periods; i++) {
                struct at_xdmac_desc    *desc = NULL;
 
-               spin_lock_bh(&atchan->lock);
+               spin_lock_irqsave(&atchan->lock, irqflags);
                desc = at_xdmac_get_desc(atchan);
                if (!desc) {
                        dev_err(chan2dev(chan), "can't get descriptor\n");
                        if (first)
                                list_splice_init(&first->descs_list, &atchan->free_descs_list);
-                       spin_unlock_bh(&atchan->lock);
+                       spin_unlock_irqrestore(&atchan->lock, irqflags);
                        return NULL;
                }
-               spin_unlock_bh(&atchan->lock);
+               spin_unlock_irqrestore(&atchan->lock, irqflags);
                dev_dbg(chan2dev(chan),
                        "%s: desc=0x%p, tx_dma_desc.phys=%pad\n",
                        __func__, desc, &desc->tx_dma_desc.phys);
 
                if (direction == DMA_DEV_TO_MEM) {
-                       desc->lld.mbr_sa = atchan->per_src_addr;
+                       desc->lld.mbr_sa = atchan->sconfig.src_addr;
                        desc->lld.mbr_da = buf_addr + i * period_len;
-                       desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_DEV_TO_MEM_CFG];
                } else {
                        desc->lld.mbr_sa = buf_addr + i * period_len;
-                       desc->lld.mbr_da = atchan->per_dst_addr;
-                       desc->lld.mbr_cfg = atchan->cfg[AT_XDMAC_MEM_TO_DEV_CFG];
+                       desc->lld.mbr_da = atchan->sconfig.dst_addr;
                }
+               desc->lld.mbr_cfg = atchan->cfg;
                desc->lld.mbr_ubc = AT_XDMAC_MBR_UBC_NDV1
                        | AT_XDMAC_MBR_UBC_NDEN
                        | AT_XDMAC_MBR_UBC_NSEN
@@ -766,6 +806,7 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
                                        | AT_XDMAC_CC_SIF(0)
                                        | AT_XDMAC_CC_MBSIZE_SIXTEEN
                                        | AT_XDMAC_CC_TYPE_MEM_TRAN;
+       unsigned long           irqflags;
 
        dev_dbg(chan2dev(chan), "%s: src=%pad, dest=%pad, len=%zd, flags=0x%lx\n",
                __func__, &src, &dest, len, flags);
@@ -798,9 +839,9 @@ at_xdmac_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dest, dma_addr_t src,
 
                dev_dbg(chan2dev(chan), "%s: remaining_size=%zu\n", __func__, remaining_size);
 
-               spin_lock_bh(&atchan->lock);
+               spin_lock_irqsave(&atchan->lock, irqflags);
                desc = at_xdmac_get_desc(atchan);
-               spin_unlock_bh(&atchan->lock);
+               spin_unlock_irqrestore(&atchan->lock, irqflags);
                if (!desc) {
                        dev_err(chan2dev(chan), "can't get descriptor\n");
                        if (first)
@@ -886,6 +927,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
        int                     residue;
        u32                     cur_nda, mask, value;
        u8                      dwidth = 0;
+       unsigned long           flags;
 
        ret = dma_cookie_status(chan, cookie, txstate);
        if (ret == DMA_COMPLETE)
@@ -894,7 +936,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
        if (!txstate)
                return ret;
 
-       spin_lock_bh(&atchan->lock);
+       spin_lock_irqsave(&atchan->lock, flags);
 
        desc = list_first_entry(&atchan->xfers_list, struct at_xdmac_desc, xfer_node);
 
@@ -904,8 +946,7 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
         */
        if (!desc->active_xfer) {
                dma_set_residue(txstate, desc->xfer_size);
-               spin_unlock_bh(&atchan->lock);
-               return ret;
+               goto spin_unlock;
        }
 
        residue = desc->xfer_size;
@@ -936,14 +977,14 @@ at_xdmac_tx_status(struct dma_chan *chan, dma_cookie_t cookie,
        }
        residue += at_xdmac_chan_read(atchan, AT_XDMAC_CUBC) << dwidth;
 
-       spin_unlock_bh(&atchan->lock);
-
        dma_set_residue(txstate, residue);
 
        dev_dbg(chan2dev(chan),
                 "%s: desc=0x%p, tx_dma_desc.phys=%pad, tx_status=%d, cookie=%d, residue=%d\n",
                 __func__, desc, &desc->tx_dma_desc.phys, ret, cookie, residue);
 
+spin_unlock:
+       spin_unlock_irqrestore(&atchan->lock, flags);
        return ret;
 }
 
@@ -964,8 +1005,9 @@ static void at_xdmac_remove_xfer(struct at_xdmac_chan *atchan,
 static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
 {
        struct at_xdmac_desc    *desc;
+       unsigned long           flags;
 
-       spin_lock_bh(&atchan->lock);
+       spin_lock_irqsave(&atchan->lock, flags);
 
        /*
         * If channel is enabled, do nothing, advance_work will be triggered
@@ -980,7 +1022,7 @@ static void at_xdmac_advance_work(struct at_xdmac_chan *atchan)
                        at_xdmac_start_xfer(atchan, desc);
        }
 
-       spin_unlock_bh(&atchan->lock);
+       spin_unlock_irqrestore(&atchan->lock, flags);
 }
 
 static void at_xdmac_handle_cyclic(struct at_xdmac_chan *atchan)
@@ -1116,12 +1158,13 @@ static int at_xdmac_device_config(struct dma_chan *chan,
 {
        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
        int ret;
+       unsigned long           flags;
 
        dev_dbg(chan2dev(chan), "%s\n", __func__);
 
-       spin_lock_bh(&atchan->lock);
+       spin_lock_irqsave(&atchan->lock, flags);
        ret = at_xdmac_set_slave_config(chan, config);
-       spin_unlock_bh(&atchan->lock);
+       spin_unlock_irqrestore(&atchan->lock, flags);
 
        return ret;
 }
@@ -1130,18 +1173,19 @@ static int at_xdmac_device_pause(struct dma_chan *chan)
 {
        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
        struct at_xdmac         *atxdmac = to_at_xdmac(atchan->chan.device);
+       unsigned long           flags;
 
        dev_dbg(chan2dev(chan), "%s\n", __func__);
 
        if (test_and_set_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status))
                return 0;
 
-       spin_lock_bh(&atchan->lock);
+       spin_lock_irqsave(&atchan->lock, flags);
        at_xdmac_write(atxdmac, AT_XDMAC_GRWS, atchan->mask);
        while (at_xdmac_chan_read(atchan, AT_XDMAC_CC)
               & (AT_XDMAC_CC_WRIP | AT_XDMAC_CC_RDIP))
                cpu_relax();
-       spin_unlock_bh(&atchan->lock);
+       spin_unlock_irqrestore(&atchan->lock, flags);
 
        return 0;
 }
@@ -1150,18 +1194,19 @@ static int at_xdmac_device_resume(struct dma_chan *chan)
 {
        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
        struct at_xdmac         *atxdmac = to_at_xdmac(atchan->chan.device);
+       unsigned long           flags;
 
        dev_dbg(chan2dev(chan), "%s\n", __func__);
 
-       spin_lock_bh(&atchan->lock);
+       spin_lock_irqsave(&atchan->lock, flags);
        if (!at_xdmac_chan_is_paused(atchan)) {
-               spin_unlock_bh(&atchan->lock);
+               spin_unlock_irqrestore(&atchan->lock, flags);
                return 0;
        }
 
        at_xdmac_write(atxdmac, AT_XDMAC_GRWR, atchan->mask);
        clear_bit(AT_XDMAC_CHAN_IS_PAUSED, &atchan->status);
-       spin_unlock_bh(&atchan->lock);
+       spin_unlock_irqrestore(&atchan->lock, flags);
 
        return 0;
 }
@@ -1171,10 +1216,11 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
        struct at_xdmac_desc    *desc, *_desc;
        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
        struct at_xdmac         *atxdmac = to_at_xdmac(atchan->chan.device);
+       unsigned long           flags;
 
        dev_dbg(chan2dev(chan), "%s\n", __func__);
 
-       spin_lock_bh(&atchan->lock);
+       spin_lock_irqsave(&atchan->lock, flags);
        at_xdmac_write(atxdmac, AT_XDMAC_GD, atchan->mask);
        while (at_xdmac_read(atxdmac, AT_XDMAC_GS) & atchan->mask)
                cpu_relax();
@@ -1184,7 +1230,7 @@ static int at_xdmac_device_terminate_all(struct dma_chan *chan)
                at_xdmac_remove_xfer(atchan, desc);
 
        clear_bit(AT_XDMAC_CHAN_IS_CYCLIC, &atchan->status);
-       spin_unlock_bh(&atchan->lock);
+       spin_unlock_irqrestore(&atchan->lock, flags);
 
        return 0;
 }
@@ -1194,8 +1240,9 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
        struct at_xdmac_chan    *atchan = to_at_xdmac_chan(chan);
        struct at_xdmac_desc    *desc;
        int                     i;
+       unsigned long           flags;
 
-       spin_lock_bh(&atchan->lock);
+       spin_lock_irqsave(&atchan->lock, flags);
 
        if (at_xdmac_chan_is_enabled(atchan)) {
                dev_err(chan2dev(chan),
@@ -1226,7 +1273,7 @@ static int at_xdmac_alloc_chan_resources(struct dma_chan *chan)
        dev_dbg(chan2dev(chan), "%s: allocated %d descriptors\n", __func__, i);
 
 spin_unlock:
-       spin_unlock_bh(&atchan->lock);
+       spin_unlock_irqrestore(&atchan->lock, flags);
        return i;
 }
 
index 2890d744bb1bb902cc095fb87841c492cef7542c..3ddfd1f6c23c0f0f891ed11d6f68cbcaaa3c6e03 100644 (file)
@@ -487,7 +487,11 @@ int dma_get_slave_caps(struct dma_chan *chan, struct dma_slave_caps *caps)
        caps->directions = device->directions;
        caps->residue_granularity = device->residue_granularity;
 
-       caps->cmd_pause = !!device->device_pause;
+       /*
+        * Some devices implement only pause (e.g. to get residuum) but no
+        * resume. However cmd_pause is advertised as pause AND resume.
+        */
+       caps->cmd_pause = !!(device->device_pause && device->device_resume);
        caps->cmd_terminate = !!device->device_terminate_all;
 
        return 0;
index 9b84def7a35373a45cf18efc9d53939bcc86baf0..f42f71e37e73767a55078aef4c81bcd238b02db1 100644 (file)
@@ -384,7 +384,10 @@ static int hsu_dma_terminate_all(struct dma_chan *chan)
        spin_lock_irqsave(&hsuc->vchan.lock, flags);
 
        hsu_dma_stop_channel(hsuc);
-       hsuc->desc = NULL;
+       if (hsuc->desc) {
+               hsu_dma_desc_free(&hsuc->desc->vdesc);
+               hsuc->desc = NULL;
+       }
 
        vchan_get_all_descriptors(&hsuc->vchan, &head);
        spin_unlock_irqrestore(&hsuc->vchan.lock, flags);
index 6de2e677be0401c6490d9a4b0a34a2b6b61a8450..74d9db05a5ad24beac7fb48b319d73240f5842b6 100644 (file)
@@ -22,6 +22,7 @@
 #include <linux/module.h>
 #include <linux/io.h>
 #include <linux/seq_file.h>
+#include <linux/vmalloc.h>
 
 #include "mic_x100_dma.h"
 
index a7d9d3029b145dfa29babeee33022bc1f7354d52..340f9e607cd8b90dfe75add027c18bd26d67f1e0 100644 (file)
@@ -2127,6 +2127,7 @@ static int pl330_terminate_all(struct dma_chan *chan)
        struct pl330_dmac *pl330 = pch->dmac;
        LIST_HEAD(list);
 
+       pm_runtime_get_sync(pl330->ddma.dev);
        spin_lock_irqsave(&pch->lock, flags);
        spin_lock(&pl330->lock);
        _stop(pch->thread);
@@ -2151,6 +2152,8 @@ static int pl330_terminate_all(struct dma_chan *chan)
        list_splice_tail_init(&pch->work_list, &pl330->desc_pool);
        list_splice_tail_init(&pch->completed_list, &pl330->desc_pool);
        spin_unlock_irqrestore(&pch->lock, flags);
+       pm_runtime_mark_last_busy(pl330->ddma.dev);
+       pm_runtime_put_autosuspend(pl330->ddma.dev);
 
        return 0;
 }
index de67fce189840eee2d75ad42e23f862103d7540b..e45d1f13f4458e9130d8f052494b33b1a11a9c60 100644 (file)
@@ -119,6 +119,18 @@ static int usb_extcon_probe(struct platform_device *pdev)
                return PTR_ERR(info->id_gpiod);
        }
 
+       info->edev = devm_extcon_dev_allocate(dev, usb_extcon_cable);
+       if (IS_ERR(info->edev)) {
+               dev_err(dev, "failed to allocate extcon device\n");
+               return -ENOMEM;
+       }
+
+       ret = devm_extcon_dev_register(dev, info->edev);
+       if (ret < 0) {
+               dev_err(dev, "failed to register extcon device\n");
+               return ret;
+       }
+
        ret = gpiod_set_debounce(info->id_gpiod,
                                 USB_GPIO_DEBOUNCE_MS * 1000);
        if (ret < 0)
@@ -142,18 +154,6 @@ static int usb_extcon_probe(struct platform_device *pdev)
                return ret;
        }
 
-       info->edev = devm_extcon_dev_allocate(dev, usb_extcon_cable);
-       if (IS_ERR(info->edev)) {
-               dev_err(dev, "failed to allocate extcon device\n");
-               return -ENOMEM;
-       }
-
-       ret = devm_extcon_dev_register(dev, info->edev);
-       if (ret < 0) {
-               dev_err(dev, "failed to register extcon device\n");
-               return ret;
-       }
-
        platform_set_drvdata(pdev, info);
        device_init_wakeup(dev, 1);
 
index 6e45a43ffe8476686bcaee1157a7acc641fc3e6b..97b1616aa391819c0579d12efcca286cfa1c7075 100644 (file)
@@ -499,19 +499,19 @@ static int __init dmi_present(const u8 *buf)
        buf += 16;
 
        if (memcmp(buf, "_DMI_", 5) == 0 && dmi_checksum(buf, 15)) {
+               if (smbios_ver)
+                       dmi_ver = smbios_ver;
+               else
+                       dmi_ver = (buf[14] & 0xF0) << 4 | (buf[14] & 0x0F);
                dmi_num = get_unaligned_le16(buf + 12);
                dmi_len = get_unaligned_le16(buf + 6);
                dmi_base = get_unaligned_le32(buf + 8);
 
                if (dmi_walk_early(dmi_decode) == 0) {
                        if (smbios_ver) {
-                               dmi_ver = smbios_ver;
-                               pr_info("SMBIOS %d.%d%s present.\n",
-                                       dmi_ver >> 8, dmi_ver & 0xFF,
-                                       (dmi_ver < 0x0300) ? "" : ".x");
+                               pr_info("SMBIOS %d.%d present.\n",
+                                      dmi_ver >> 8, dmi_ver & 0xFF);
                        } else {
-                               dmi_ver = (buf[14] & 0xF0) << 4 |
-                                          (buf[14] & 0x0F);
                                pr_info("Legacy DMI %d.%d present.\n",
                                       dmi_ver >> 8, dmi_ver & 0xFF);
                        }
index 8de4da5c9ab69c919389057078d352fd492fbcd3..54071c1483400d41e214c0f83512ca1f4600814a 100644 (file)
@@ -18,6 +18,11 @@ config EFI_VARS
          Subsequent efibootmgr releases may be found at:
          <http://github.com/vathpela/efibootmgr>
 
+config EFI_ESRT
+       bool
+       depends on EFI && !IA64
+       default y
+
 config EFI_VARS_PSTORE
        tristate "Register efivars backend for pstore"
        depends on EFI_VARS && PSTORE
index d8be608a9f3be733bf4d56a1e360ecc3c5471e7b..6fd3da938717c27c233af9c6b66b186f69fc93fa 100644 (file)
@@ -3,6 +3,7 @@
 #
 obj-$(CONFIG_EFI)                      += efi.o vars.o reboot.o
 obj-$(CONFIG_EFI_VARS)                 += efivars.o
+obj-$(CONFIG_EFI_ESRT)                 += esrt.o
 obj-$(CONFIG_EFI_VARS_PSTORE)          += efi-pstore.o
 obj-$(CONFIG_UEFI_CPER)                        += cper.o
 obj-$(CONFIG_EFI_RUNTIME_MAP)          += runtime-map.o
index 3061bb8629dc3fbdf19e19d373d75b2286cf8376..ca617f40574ac2bb8e5697b858a1c52f93c8fad9 100644 (file)
@@ -39,6 +39,7 @@ struct efi __read_mostly efi = {
        .fw_vendor  = EFI_INVALID_TABLE_ADDR,
        .runtime    = EFI_INVALID_TABLE_ADDR,
        .config_table  = EFI_INVALID_TABLE_ADDR,
+       .esrt       = EFI_INVALID_TABLE_ADDR,
 };
 EXPORT_SYMBOL(efi);
 
@@ -64,7 +65,7 @@ static int __init parse_efi_cmdline(char *str)
 }
 early_param("efi", parse_efi_cmdline);
 
-static struct kobject *efi_kobj;
+struct kobject *efi_kobj;
 static struct kobject *efivars_kobj;
 
 /*
@@ -85,10 +86,15 @@ static ssize_t systab_show(struct kobject *kobj,
                str += sprintf(str, "ACPI20=0x%lx\n", efi.acpi20);
        if (efi.acpi != EFI_INVALID_TABLE_ADDR)
                str += sprintf(str, "ACPI=0x%lx\n", efi.acpi);
-       if (efi.smbios != EFI_INVALID_TABLE_ADDR)
-               str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
+       /*
+        * If both SMBIOS and SMBIOS3 entry points are implemented, the
+        * SMBIOS3 entry point shall be preferred, so we list it first to
+        * let applications stop parsing after the first match.
+        */
        if (efi.smbios3 != EFI_INVALID_TABLE_ADDR)
                str += sprintf(str, "SMBIOS3=0x%lx\n", efi.smbios3);
+       if (efi.smbios != EFI_INVALID_TABLE_ADDR)
+               str += sprintf(str, "SMBIOS=0x%lx\n", efi.smbios);
        if (efi.hcdp != EFI_INVALID_TABLE_ADDR)
                str += sprintf(str, "HCDP=0x%lx\n", efi.hcdp);
        if (efi.boot_info != EFI_INVALID_TABLE_ADDR)
@@ -232,6 +238,84 @@ err_put:
 
 subsys_initcall(efisubsys_init);
 
+/*
+ * Find the efi memory descriptor for a given physical address.  Given a
+ * physicall address, determine if it exists within an EFI Memory Map entry,
+ * and if so, populate the supplied memory descriptor with the appropriate
+ * data.
+ */
+int __init efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md)
+{
+       struct efi_memory_map *map = efi.memmap;
+       void *p, *e;
+
+       if (!efi_enabled(EFI_MEMMAP)) {
+               pr_err_once("EFI_MEMMAP is not enabled.\n");
+               return -EINVAL;
+       }
+
+       if (!map) {
+               pr_err_once("efi.memmap is not set.\n");
+               return -EINVAL;
+       }
+       if (!out_md) {
+               pr_err_once("out_md is null.\n");
+               return -EINVAL;
+        }
+       if (WARN_ON_ONCE(!map->phys_map))
+               return -EINVAL;
+       if (WARN_ON_ONCE(map->nr_map == 0) || WARN_ON_ONCE(map->desc_size == 0))
+               return -EINVAL;
+
+       e = map->phys_map + map->nr_map * map->desc_size;
+       for (p = map->phys_map; p < e; p += map->desc_size) {
+               efi_memory_desc_t *md;
+               u64 size;
+               u64 end;
+
+               /*
+                * If a driver calls this after efi_free_boot_services,
+                * ->map will be NULL, and the target may also not be mapped.
+                * So just always get our own virtual map on the CPU.
+                *
+                */
+               md = early_memremap((phys_addr_t)p, sizeof (*md));
+               if (!md) {
+                       pr_err_once("early_memremap(%p, %zu) failed.\n",
+                                   p, sizeof (*md));
+                       return -ENOMEM;
+               }
+
+               if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
+                   md->type != EFI_BOOT_SERVICES_DATA &&
+                   md->type != EFI_RUNTIME_SERVICES_DATA) {
+                       early_memunmap(md, sizeof (*md));
+                       continue;
+               }
+
+               size = md->num_pages << EFI_PAGE_SHIFT;
+               end = md->phys_addr + size;
+               if (phys_addr >= md->phys_addr && phys_addr < end) {
+                       memcpy(out_md, md, sizeof(*out_md));
+                       early_memunmap(md, sizeof (*md));
+                       return 0;
+               }
+
+               early_memunmap(md, sizeof (*md));
+       }
+       pr_err_once("requested map not found.\n");
+       return -ENOENT;
+}
+
+/*
+ * Calculate the highest address of an efi memory descriptor.
+ */
+u64 __init efi_mem_desc_end(efi_memory_desc_t *md)
+{
+       u64 size = md->num_pages << EFI_PAGE_SHIFT;
+       u64 end = md->phys_addr + size;
+       return end;
+}
 
 /*
  * We can't ioremap data in EFI boot services RAM, because we've already mapped
@@ -274,6 +358,7 @@ static __initdata efi_config_table_type_t common_tables[] = {
        {SMBIOS_TABLE_GUID, "SMBIOS", &efi.smbios},
        {SMBIOS3_TABLE_GUID, "SMBIOS 3.0", &efi.smbios3},
        {UGA_IO_PROTOCOL_GUID, "UGA", &efi.uga},
+       {EFI_SYSTEM_RESOURCE_TABLE_GUID, "ESRT", &efi.esrt},
        {NULL_GUID, NULL, NULL},
 };
 
index 7b2e0496e0c084c4e9e319c04d61245abdde5edd..756eca8c4cf8f291025a3ad44f7cbb9981aeb5fd 100644 (file)
@@ -535,7 +535,7 @@ static ssize_t efivar_delete(struct file *filp, struct kobject *kobj,
  * efivar_create_sysfs_entry - create a new entry in sysfs
  * @new_var: efivar entry to create
  *
- * Returns 1 on failure, 0 on success
+ * Returns 0 on success, negative error code on failure
  */
 static int
 efivar_create_sysfs_entry(struct efivar_entry *new_var)
@@ -544,6 +544,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
        char *short_name;
        unsigned long variable_name_size;
        efi_char16_t *variable_name;
+       int ret;
 
        variable_name = new_var->var.VariableName;
        variable_name_size = ucs2_strlen(variable_name) * sizeof(efi_char16_t);
@@ -558,7 +559,7 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
        short_name = kzalloc(short_name_size, GFP_KERNEL);
 
        if (!short_name)
-               return 1;
+               return -ENOMEM;
 
        /* Convert Unicode to normal chars (assume top bits are 0),
           ala UTF-8 */
@@ -574,11 +575,11 @@ efivar_create_sysfs_entry(struct efivar_entry *new_var)
 
        new_var->kobj.kset = efivars_kset;
 
-       i = kobject_init_and_add(&new_var->kobj, &efivar_ktype,
+       ret = kobject_init_and_add(&new_var->kobj, &efivar_ktype,
                                   NULL, "%s", short_name);
        kfree(short_name);
-       if (i)
-               return 1;
+       if (ret)
+               return ret;
 
        kobject_uevent(&new_var->kobj, KOBJ_ADD);
        efivar_entry_add(new_var, &efivar_sysfs_list);
diff --git a/drivers/firmware/efi/esrt.c b/drivers/firmware/efi/esrt.c
new file mode 100644 (file)
index 0000000..a5b95d6
--- /dev/null
@@ -0,0 +1,471 @@
+/*
+ * esrt.c
+ *
+ * This module exports EFI System Resource Table (ESRT) entries into userspace
+ * through the sysfs file system. The ESRT provides a read-only catalog of
+ * system components for which the system accepts firmware upgrades via UEFI's
+ * "Capsule Update" feature. This module allows userland utilities to evaluate
+ * what firmware updates can be applied to this system, and potentially arrange
+ * for those updates to occur.
+ *
+ * Data is currently found below /sys/firmware/efi/esrt/...
+ */
+#define pr_fmt(fmt) "esrt: " fmt
+
+#include <linux/capability.h>
+#include <linux/device.h>
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/kobject.h>
+#include <linux/list.h>
+#include <linux/memblock.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+
+#include <asm/io.h>
+#include <asm/early_ioremap.h>
+
+struct efi_system_resource_entry_v1 {
+       efi_guid_t      fw_class;
+       u32             fw_type;
+       u32             fw_version;
+       u32             lowest_supported_fw_version;
+       u32             capsule_flags;
+       u32             last_attempt_version;
+       u32             last_attempt_status;
+};
+
+/*
+ * _count and _version are what they seem like.  _max is actually just
+ * accounting info for the firmware when creating the table; it should never
+ * have been exposed to us.  To wit, the spec says:
+ * The maximum number of resource array entries that can be within the
+ * table without reallocating the table, must not be zero.
+ * Since there's no guidance about what that means in terms of memory layout,
+ * it means nothing to us.
+ */
+struct efi_system_resource_table {
+       u32     fw_resource_count;
+       u32     fw_resource_count_max;
+       u64     fw_resource_version;
+       u8      entries[];
+};
+
+static phys_addr_t esrt_data;
+static size_t esrt_data_size;
+
+static struct efi_system_resource_table *esrt;
+
+struct esre_entry {
+       union {
+               struct efi_system_resource_entry_v1 *esre1;
+       } esre;
+
+       struct kobject kobj;
+       struct list_head list;
+};
+
+/* global list of esre_entry. */
+static LIST_HEAD(entry_list);
+
+/* entry attribute */
+struct esre_attribute {
+       struct attribute attr;
+       ssize_t (*show)(struct esre_entry *entry, char *buf);
+       ssize_t (*store)(struct esre_entry *entry,
+                        const char *buf, size_t count);
+};
+
+static struct esre_entry *to_entry(struct kobject *kobj)
+{
+       return container_of(kobj, struct esre_entry, kobj);
+}
+
+static struct esre_attribute *to_attr(struct attribute *attr)
+{
+       return container_of(attr, struct esre_attribute, attr);
+}
+
+static ssize_t esre_attr_show(struct kobject *kobj,
+                             struct attribute *_attr, char *buf)
+{
+       struct esre_entry *entry = to_entry(kobj);
+       struct esre_attribute *attr = to_attr(_attr);
+
+       /* Don't tell normal users what firmware versions we've got... */
+       if (!capable(CAP_SYS_ADMIN))
+               return -EACCES;
+
+       return attr->show(entry, buf);
+}
+
+static const struct sysfs_ops esre_attr_ops = {
+       .show = esre_attr_show,
+};
+
+/* Generic ESRT Entry ("ESRE") support. */
+static ssize_t esre_fw_class_show(struct esre_entry *entry, char *buf)
+{
+       char *str = buf;
+
+       efi_guid_to_str(&entry->esre.esre1->fw_class, str);
+       str += strlen(str);
+       str += sprintf(str, "\n");
+
+       return str - buf;
+}
+
+static struct esre_attribute esre_fw_class = __ATTR(fw_class, 0400,
+       esre_fw_class_show, NULL);
+
+#define esre_attr_decl(name, size, fmt) \
+static ssize_t esre_##name##_show(struct esre_entry *entry, char *buf) \
+{ \
+       return sprintf(buf, fmt "\n", \
+                      le##size##_to_cpu(entry->esre.esre1->name)); \
+} \
+\
+static struct esre_attribute esre_##name = __ATTR(name, 0400, \
+       esre_##name##_show, NULL)
+
+esre_attr_decl(fw_type, 32, "%u");
+esre_attr_decl(fw_version, 32, "%u");
+esre_attr_decl(lowest_supported_fw_version, 32, "%u");
+esre_attr_decl(capsule_flags, 32, "0x%x");
+esre_attr_decl(last_attempt_version, 32, "%u");
+esre_attr_decl(last_attempt_status, 32, "%u");
+
+static struct attribute *esre1_attrs[] = {
+       &esre_fw_class.attr,
+       &esre_fw_type.attr,
+       &esre_fw_version.attr,
+       &esre_lowest_supported_fw_version.attr,
+       &esre_capsule_flags.attr,
+       &esre_last_attempt_version.attr,
+       &esre_last_attempt_status.attr,
+       NULL
+};
+static void esre_release(struct kobject *kobj)
+{
+       struct esre_entry *entry = to_entry(kobj);
+
+       list_del(&entry->list);
+       kfree(entry);
+}
+
+static struct kobj_type esre1_ktype = {
+       .release = esre_release,
+       .sysfs_ops = &esre_attr_ops,
+       .default_attrs = esre1_attrs,
+};
+
+
+static struct kobject *esrt_kobj;
+static struct kset *esrt_kset;
+
+static int esre_create_sysfs_entry(void *esre, int entry_num)
+{
+       struct esre_entry *entry;
+       char name[20];
+
+       entry = kzalloc(sizeof(*entry), GFP_KERNEL);
+       if (!entry)
+               return -ENOMEM;
+
+       sprintf(name, "entry%d", entry_num);
+
+       entry->kobj.kset = esrt_kset;
+
+       if (esrt->fw_resource_version == 1) {
+               int rc = 0;
+
+               entry->esre.esre1 = esre;
+               rc = kobject_init_and_add(&entry->kobj, &esre1_ktype, NULL,
+                                         "%s", name);
+               if (rc) {
+                       kfree(entry);
+                       return rc;
+               }
+       }
+
+       list_add_tail(&entry->list, &entry_list);
+       return 0;
+}
+
+/* support for displaying ESRT fields at the top level */
+#define esrt_attr_decl(name, size, fmt) \
+static ssize_t esrt_##name##_show(struct kobject *kobj, \
+                                 struct kobj_attribute *attr, char *buf)\
+{ \
+       return sprintf(buf, fmt "\n", le##size##_to_cpu(esrt->name)); \
+} \
+\
+static struct kobj_attribute esrt_##name = __ATTR(name, 0400, \
+       esrt_##name##_show, NULL)
+
+esrt_attr_decl(fw_resource_count, 32, "%u");
+esrt_attr_decl(fw_resource_count_max, 32, "%u");
+esrt_attr_decl(fw_resource_version, 64, "%llu");
+
+static struct attribute *esrt_attrs[] = {
+       &esrt_fw_resource_count.attr,
+       &esrt_fw_resource_count_max.attr,
+       &esrt_fw_resource_version.attr,
+       NULL,
+};
+
+static inline int esrt_table_exists(void)
+{
+       if (!efi_enabled(EFI_CONFIG_TABLES))
+               return 0;
+       if (efi.esrt == EFI_INVALID_TABLE_ADDR)
+               return 0;
+       return 1;
+}
+
+static umode_t esrt_attr_is_visible(struct kobject *kobj,
+                                   struct attribute *attr, int n)
+{
+       if (!esrt_table_exists())
+               return 0;
+       return attr->mode;
+}
+
+static struct attribute_group esrt_attr_group = {
+       .attrs = esrt_attrs,
+       .is_visible = esrt_attr_is_visible,
+};
+
+/*
+ * remap the table, copy it to kmalloced pages, and unmap it.
+ */
+void __init efi_esrt_init(void)
+{
+       void *va;
+       struct efi_system_resource_table tmpesrt;
+       struct efi_system_resource_entry_v1 *v1_entries;
+       size_t size, max, entry_size, entries_size;
+       efi_memory_desc_t md;
+       int rc;
+       phys_addr_t end;
+
+       pr_debug("esrt-init: loading.\n");
+       if (!esrt_table_exists())
+               return;
+
+       rc = efi_mem_desc_lookup(efi.esrt, &md);
+       if (rc < 0) {
+               pr_err("ESRT header is not in the memory map.\n");
+               return;
+       }
+
+       max = efi_mem_desc_end(&md);
+       if (max < efi.esrt) {
+               pr_err("EFI memory descriptor is invalid. (esrt: %p max: %p)\n",
+                      (void *)efi.esrt, (void *)max);
+               return;
+       }
+
+       size = sizeof(*esrt);
+       max -= efi.esrt;
+
+       if (max < size) {
+               pr_err("ESRT header doen't fit on single memory map entry. (size: %zu max: %zu)\n",
+                      size, max);
+               return;
+       }
+
+       va = early_memremap(efi.esrt, size);
+       if (!va) {
+               pr_err("early_memremap(%p, %zu) failed.\n", (void *)efi.esrt,
+                      size);
+               return;
+       }
+
+       memcpy(&tmpesrt, va, sizeof(tmpesrt));
+
+       if (tmpesrt.fw_resource_version == 1) {
+               entry_size = sizeof (*v1_entries);
+       } else {
+               pr_err("Unsupported ESRT version %lld.\n",
+                      tmpesrt.fw_resource_version);
+               return;
+       }
+
+       if (tmpesrt.fw_resource_count > 0 && max - size < entry_size) {
+               pr_err("ESRT memory map entry can only hold the header. (max: %zu size: %zu)\n",
+                      max - size, entry_size);
+               goto err_memunmap;
+       }
+
+       /*
+        * The format doesn't really give us any boundary to test here,
+        * so I'm making up 128 as the max number of individually updatable
+        * components we support.
+        * 128 should be pretty excessive, but there's still some chance
+        * somebody will do that someday and we'll need to raise this.
+        */
+       if (tmpesrt.fw_resource_count > 128) {
+               pr_err("ESRT says fw_resource_count has very large value %d.\n",
+                      tmpesrt.fw_resource_count);
+               goto err_memunmap;
+       }
+
+       /*
+        * We know it can't be larger than N * sizeof() here, and N is limited
+        * by the previous test to a small number, so there's no overflow.
+        */
+       entries_size = tmpesrt.fw_resource_count * entry_size;
+       if (max < size + entries_size) {
+               pr_err("ESRT does not fit on single memory map entry (size: %zu max: %zu)\n",
+                      size, max);
+               goto err_memunmap;
+       }
+
+       /* remap it with our (plausible) new pages */
+       early_memunmap(va, size);
+       size += entries_size;
+       va = early_memremap(efi.esrt, size);
+       if (!va) {
+               pr_err("early_memremap(%p, %zu) failed.\n", (void *)efi.esrt,
+                      size);
+               return;
+       }
+
+       esrt_data = (phys_addr_t)efi.esrt;
+       esrt_data_size = size;
+
+       end = esrt_data + size;
+       pr_info("Reserving ESRT space from %pa to %pa.\n", &esrt_data, &end);
+       memblock_reserve(esrt_data, esrt_data_size);
+
+       pr_debug("esrt-init: loaded.\n");
+err_memunmap:
+       early_memunmap(va, size);
+}
+
+static int __init register_entries(void)
+{
+       struct efi_system_resource_entry_v1 *v1_entries = (void *)esrt->entries;
+       int i, rc;
+
+       if (!esrt_table_exists())
+               return 0;
+
+       for (i = 0; i < le32_to_cpu(esrt->fw_resource_count); i++) {
+               void *esre = NULL;
+               if (esrt->fw_resource_version == 1) {
+                       esre = &v1_entries[i];
+               } else {
+                       pr_err("Unsupported ESRT version %lld.\n",
+                              esrt->fw_resource_version);
+                       return -EINVAL;
+               }
+
+               rc = esre_create_sysfs_entry(esre, i);
+               if (rc < 0) {
+                       pr_err("ESRT entry creation failed with error %d.\n",
+                              rc);
+                       return rc;
+               }
+       }
+       return 0;
+}
+
+static void cleanup_entry_list(void)
+{
+       struct esre_entry *entry, *next;
+
+       list_for_each_entry_safe(entry, next, &entry_list, list) {
+               kobject_put(&entry->kobj);
+       }
+}
+
+static int __init esrt_sysfs_init(void)
+{
+       int error;
+       struct efi_system_resource_table __iomem *ioesrt;
+
+       pr_debug("esrt-sysfs: loading.\n");
+       if (!esrt_data || !esrt_data_size)
+               return -ENOSYS;
+
+       ioesrt = ioremap(esrt_data, esrt_data_size);
+       if (!ioesrt) {
+               pr_err("ioremap(%pa, %zu) failed.\n", &esrt_data,
+                      esrt_data_size);
+               return -ENOMEM;
+       }
+
+       esrt = kmalloc(esrt_data_size, GFP_KERNEL);
+       if (!esrt) {
+               pr_err("kmalloc failed. (wanted %zu bytes)\n", esrt_data_size);
+               iounmap(ioesrt);
+               return -ENOMEM;
+       }
+
+       memcpy_fromio(esrt, ioesrt, esrt_data_size);
+
+       esrt_kobj = kobject_create_and_add("esrt", efi_kobj);
+       if (!esrt_kobj) {
+               pr_err("Firmware table registration failed.\n");
+               error = -ENOMEM;
+               goto err;
+       }
+
+       error = sysfs_create_group(esrt_kobj, &esrt_attr_group);
+       if (error) {
+               pr_err("Sysfs attribute export failed with error %d.\n",
+                      error);
+               goto err_remove_esrt;
+       }
+
+       esrt_kset = kset_create_and_add("entries", NULL, esrt_kobj);
+       if (!esrt_kset) {
+               pr_err("kset creation failed.\n");
+               error = -ENOMEM;
+               goto err_remove_group;
+       }
+
+       error = register_entries();
+       if (error)
+               goto err_cleanup_list;
+
+       memblock_remove(esrt_data, esrt_data_size);
+
+       pr_debug("esrt-sysfs: loaded.\n");
+
+       return 0;
+err_cleanup_list:
+       cleanup_entry_list();
+       kset_unregister(esrt_kset);
+err_remove_group:
+       sysfs_remove_group(esrt_kobj, &esrt_attr_group);
+err_remove_esrt:
+       kobject_put(esrt_kobj);
+err:
+       kfree(esrt);
+       esrt = NULL;
+       return error;
+}
+
+static void __exit esrt_sysfs_exit(void)
+{
+       pr_debug("esrt-sysfs: unloading.\n");
+       cleanup_entry_list();
+       kset_unregister(esrt_kset);
+       sysfs_remove_group(esrt_kobj, &esrt_attr_group);
+       kfree(esrt);
+       esrt = NULL;
+       kobject_del(esrt_kobj);
+       kobject_put(esrt_kobj);
+}
+
+module_init(esrt_sysfs_init);
+module_exit(esrt_sysfs_exit);
+
+MODULE_AUTHOR("Peter Jones <pjones@redhat.com>");
+MODULE_DESCRIPTION("EFI System Resource Table support");
+MODULE_LICENSE("GPL");
index 071c2c969eec06ad929ecfb871c614297a615e9e..72791232e46ba44ff474cf7dadff5ccb433d7348 100644 (file)
@@ -186,8 +186,20 @@ struct ibft_kobject {
 
 static struct iscsi_boot_kset *boot_kset;
 
+/* fully null address */
 static const char nulls[16];
 
+/* IPv4-mapped IPv6 ::ffff:0.0.0.0 */
+static const char mapped_nulls[16] = { 0x00, 0x00, 0x00, 0x00,
+                                       0x00, 0x00, 0x00, 0x00,
+                                       0x00, 0x00, 0xff, 0xff,
+                                       0x00, 0x00, 0x00, 0x00 };
+
+static int address_not_null(u8 *ip)
+{
+       return (memcmp(ip, nulls, 16) && memcmp(ip, mapped_nulls, 16));
+}
+
 /*
  * Helper functions to parse data properly.
  */
@@ -445,7 +457,7 @@ static umode_t ibft_check_nic_for(void *data, int type)
                rc = S_IRUGO;
                break;
        case ISCSI_BOOT_ETH_IP_ADDR:
-               if (memcmp(nic->ip_addr, nulls, sizeof(nic->ip_addr)))
+               if (address_not_null(nic->ip_addr))
                        rc = S_IRUGO;
                break;
        case ISCSI_BOOT_ETH_SUBNET_MASK:
@@ -456,21 +468,19 @@ static umode_t ibft_check_nic_for(void *data, int type)
                rc = S_IRUGO;
                break;
        case ISCSI_BOOT_ETH_GATEWAY:
-               if (memcmp(nic->gateway, nulls, sizeof(nic->gateway)))
+               if (address_not_null(nic->gateway))
                        rc = S_IRUGO;
                break;
        case ISCSI_BOOT_ETH_PRIMARY_DNS:
-               if (memcmp(nic->primary_dns, nulls,
-                          sizeof(nic->primary_dns)))
+               if (address_not_null(nic->primary_dns))
                        rc = S_IRUGO;
                break;
        case ISCSI_BOOT_ETH_SECONDARY_DNS:
-               if (memcmp(nic->secondary_dns, nulls,
-                          sizeof(nic->secondary_dns)))
+               if (address_not_null(nic->secondary_dns))
                        rc = S_IRUGO;
                break;
        case ISCSI_BOOT_ETH_DHCP:
-               if (memcmp(nic->dhcp, nulls, sizeof(nic->dhcp)))
+               if (address_not_null(nic->dhcp))
                        rc = S_IRUGO;
                break;
        case ISCSI_BOOT_ETH_VLAN:
@@ -536,23 +546,19 @@ static umode_t __init ibft_check_initiator_for(void *data, int type)
                rc = S_IRUGO;
                break;
        case ISCSI_BOOT_INI_ISNS_SERVER:
-               if (memcmp(init->isns_server, nulls,
-                          sizeof(init->isns_server)))
+               if (address_not_null(init->isns_server))
                        rc = S_IRUGO;
                break;
        case ISCSI_BOOT_INI_SLP_SERVER:
-               if (memcmp(init->slp_server, nulls,
-                          sizeof(init->slp_server)))
+               if (address_not_null(init->slp_server))
                        rc = S_IRUGO;
                break;
        case ISCSI_BOOT_INI_PRI_RADIUS_SERVER:
-               if (memcmp(init->pri_radius_server, nulls,
-                          sizeof(init->pri_radius_server)))
+               if (address_not_null(init->pri_radius_server))
                        rc = S_IRUGO;
                break;
        case ISCSI_BOOT_INI_SEC_RADIUS_SERVER:
-               if (memcmp(init->sec_radius_server, nulls,
-                          sizeof(init->sec_radius_server)))
+               if (address_not_null(init->sec_radius_server))
                        rc = S_IRUGO;
                break;
        case ISCSI_BOOT_INI_INITIATOR_NAME:
index 6b8115f342085bb3b25f78ad8c13ed6ce10a9d7b..83f281dda1e0f41fc4be3c8d2cb4e02407ab837d 100644 (file)
@@ -117,7 +117,7 @@ static int kempld_gpio_get_direction(struct gpio_chip *chip, unsigned offset)
                = container_of(chip, struct kempld_gpio_data, chip);
        struct kempld_device_data *pld = gpio->pld;
 
-       return kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset);
+       return !kempld_gpio_get_bit(pld, KEMPLD_GPIO_DIR_NUM(offset), offset);
 }
 
 static int kempld_gpio_pincount(struct kempld_device_data *pld)
index cd1d5bf48f36e8a490e6a99fbafdf98c4b2abadf..b232397ad7ec1599ffda494f73fdc02fe8e83875 100644 (file)
@@ -1054,38 +1054,8 @@ static void omap_gpio_mod_init(struct gpio_bank *bank)
                dev_err(bank->dev, "Could not get gpio dbck\n");
 }
 
-static void
-omap_mpuio_alloc_gc(struct gpio_bank *bank, unsigned int irq_start,
-                   unsigned int num)
-{
-       struct irq_chip_generic *gc;
-       struct irq_chip_type *ct;
-
-       gc = irq_alloc_generic_chip("MPUIO", 1, irq_start, bank->base,
-                                   handle_simple_irq);
-       if (!gc) {
-               dev_err(bank->dev, "Memory alloc failed for gc\n");
-               return;
-       }
-
-       ct = gc->chip_types;
-
-       /* NOTE: No ack required, reading IRQ status clears it. */
-       ct->chip.irq_mask = irq_gc_mask_set_bit;
-       ct->chip.irq_unmask = irq_gc_mask_clr_bit;
-       ct->chip.irq_set_type = omap_gpio_irq_type;
-
-       if (bank->regs->wkup_en)
-               ct->chip.irq_set_wake = omap_gpio_wake_enable;
-
-       ct->regs.mask = OMAP_MPUIO_GPIO_INT / bank->stride;
-       irq_setup_generic_chip(gc, IRQ_MSK(num), IRQ_GC_INIT_MASK_CACHE,
-                              IRQ_NOREQUEST | IRQ_NOPROBE, 0);
-}
-
 static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
 {
-       int j;
        static int gpio;
        int irq_base = 0;
        int ret;
@@ -1132,6 +1102,15 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
        }
 #endif
 
+       /* MPUIO is a bit different, reading IRQ status clears it */
+       if (bank->is_mpuio) {
+               irqc->irq_ack = dummy_irq_chip.irq_ack;
+               irqc->irq_mask = irq_gc_mask_set_bit;
+               irqc->irq_unmask = irq_gc_mask_clr_bit;
+               if (!bank->regs->wkup_en)
+                       irqc->irq_set_wake = NULL;
+       }
+
        ret = gpiochip_irqchip_add(&bank->chip, irqc,
                                   irq_base, omap_gpio_irq_handler,
                                   IRQ_TYPE_NONE);
@@ -1145,15 +1124,6 @@ static int omap_gpio_chip_init(struct gpio_bank *bank, struct irq_chip *irqc)
        gpiochip_set_chained_irqchip(&bank->chip, irqc,
                                     bank->irq, omap_gpio_irq_handler);
 
-       for (j = 0; j < bank->width; j++) {
-               int irq = irq_find_mapping(bank->chip.irqdomain, j);
-               if (bank->is_mpuio) {
-                       omap_mpuio_alloc_gc(bank, irq, bank->width);
-                       irq_set_chip_and_handler(irq, NULL, NULL);
-                       set_irq_flags(irq, 0);
-               }
-       }
-
        return 0;
 }
 
index d2303d50f56141c527c9d8b82c956c6c8169e239..725d16138b740e27a39d151ec5f7bfdedb9a969b 100644 (file)
@@ -550,7 +550,7 @@ acpi_gpio_adr_space_handler(u32 function, acpi_physical_address address,
 
        length = min(agpio->pin_table_length, (u16)(pin_index + bits));
        for (i = pin_index; i < length; ++i) {
-               unsigned pin = agpio->pin_table[i];
+               int pin = agpio->pin_table[i];
                struct acpi_gpio_connection *conn;
                struct gpio_desc *desc;
                bool found;
index 7722ed53bd651faae15692621d099551ef9bf308..af3bc7a8033bdcbaa2e93602bb107fbe12968d35 100644 (file)
@@ -551,6 +551,7 @@ static struct class gpio_class = {
  */
 int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
 {
+       struct gpio_chip        *chip;
        unsigned long           flags;
        int                     status;
        const char              *ioname = NULL;
@@ -568,8 +569,16 @@ int gpiod_export(struct gpio_desc *desc, bool direction_may_change)
                return -EINVAL;
        }
 
+       chip = desc->chip;
+
        mutex_lock(&sysfs_lock);
 
+       /* check if chip is being removed */
+       if (!chip || !chip->exported) {
+               status = -ENODEV;
+               goto fail_unlock;
+       }
+
        spin_lock_irqsave(&gpio_lock, flags);
        if (!test_bit(FLAG_REQUESTED, &desc->flags) ||
             test_bit(FLAG_EXPORT, &desc->flags)) {
@@ -783,12 +792,15 @@ void gpiochip_unexport(struct gpio_chip *chip)
 {
        int                     status;
        struct device           *dev;
+       struct gpio_desc *desc;
+       unsigned int i;
 
        mutex_lock(&sysfs_lock);
        dev = class_find_device(&gpio_class, NULL, chip, match_export);
        if (dev) {
                put_device(dev);
                device_unregister(dev);
+               /* prevent further gpiod exports */
                chip->exported = false;
                status = 0;
        } else
@@ -797,6 +809,13 @@ void gpiochip_unexport(struct gpio_chip *chip)
 
        if (status)
                chip_dbg(chip, "%s: status %d\n", __func__, status);
+
+       /* unregister gpiod class devices owned by sysfs */
+       for (i = 0; i < chip->ngpio; i++) {
+               desc = &chip->desc[i];
+               if (test_and_clear_bit(FLAG_SYSFS, &desc->flags))
+                       gpiod_free(desc);
+       }
 }
 
 static int __init gpiolib_sysfs_init(void)
index 59eaa23767d8dca5bddf740fa90a24c9699c4a0b..6bc612b8a49fcf859261173e00d0e7389d7d2b05 100644 (file)
@@ -53,6 +53,11 @@ static DEFINE_MUTEX(gpio_lookup_lock);
 static LIST_HEAD(gpio_lookup_list);
 LIST_HEAD(gpio_chips);
 
+
+static void gpiochip_free_hogs(struct gpio_chip *chip);
+static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
+
+
 static inline void desc_set_label(struct gpio_desc *d, const char *label)
 {
        d->label = label;
@@ -297,6 +302,7 @@ int gpiochip_add(struct gpio_chip *chip)
 
 err_remove_chip:
        acpi_gpiochip_remove(chip);
+       gpiochip_free_hogs(chip);
        of_gpiochip_remove(chip);
        spin_lock_irqsave(&gpio_lock, flags);
        list_del(&chip->list);
@@ -313,10 +319,6 @@ err_free_descs:
 }
 EXPORT_SYMBOL_GPL(gpiochip_add);
 
-/* Forward-declaration */
-static void gpiochip_irqchip_remove(struct gpio_chip *gpiochip);
-static void gpiochip_free_hogs(struct gpio_chip *chip);
-
 /**
  * gpiochip_remove() - unregister a gpio_chip
  * @chip: the chip to unregister
index 69af73f153103075f00c9344f836bd7eb3b24668..596ee5cd3b842df597c57ee890d16a97ec8cc9ae 100644 (file)
@@ -430,9 +430,10 @@ static int unregister_process_nocpsch(struct device_queue_manager *dqm,
 
        BUG_ON(!dqm || !qpd);
 
-       BUG_ON(!list_empty(&qpd->queues_list));
+       pr_debug("In func %s\n", __func__);
 
-       pr_debug("kfd: In func %s\n", __func__);
+       pr_debug("qpd->queues_list is %s\n",
+                       list_empty(&qpd->queues_list) ? "empty" : "not empty");
 
        retval = 0;
        mutex_lock(&dqm->lock);
@@ -882,6 +883,8 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
                return -ENOMEM;
        }
 
+       init_sdma_vm(dqm, q, qpd);
+
        retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
                                &q->gart_mqd_addr, &q->properties);
        if (retval != 0)
index 661c6605d31b39033a42a5d4297e1684ed734011..c25728bc388a2be7134cb3e6b895a7a39d4189a2 100644 (file)
@@ -684,8 +684,6 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
                        dev->node_props.cpu_core_id_base);
        sysfs_show_32bit_prop(buffer, "simd_id_base",
                        dev->node_props.simd_id_base);
-       sysfs_show_32bit_prop(buffer, "capability",
-                       dev->node_props.capability);
        sysfs_show_32bit_prop(buffer, "max_waves_per_simd",
                        dev->node_props.max_waves_per_simd);
        sysfs_show_32bit_prop(buffer, "lds_size_in_kb",
@@ -728,14 +726,16 @@ static ssize_t node_show(struct kobject *kobj, struct attribute *attr,
                sysfs_show_32bit_prop(buffer, "max_engine_clk_fcompute",
                        dev->gpu->kfd2kgd->get_max_engine_clock_in_mhz(
                                        dev->gpu->kgd));
+
                sysfs_show_64bit_prop(buffer, "local_mem_size",
-                       dev->gpu->kfd2kgd->get_vmem_size(
-                                       dev->gpu->kgd));
+                               (unsigned long long int) 0);
 
                sysfs_show_32bit_prop(buffer, "fw_version",
                        dev->gpu->kfd2kgd->get_fw_version(
                                                dev->gpu->kgd,
                                                KGD_ENGINE_MEC1));
+               sysfs_show_32bit_prop(buffer, "capability",
+                               dev->node_props.capability);
        }
 
        return sysfs_show_32bit_prop(buffer, "max_engine_clk_ccompute",
index 266dcd6cdf3bf3ad1d487ad70110496b294e5491..0a957828b3bd3161d17b21b9e5f946307e2d17da 100644 (file)
@@ -36,9 +36,6 @@
 
 #include <linux/pci.h>
 #include <linux/export.h>
-#ifdef CONFIG_X86
-#include <asm/mtrr.h>
-#endif
 
 static int drm_version(struct drm_device *dev, void *data,
                       struct drm_file *file_priv);
@@ -197,16 +194,7 @@ static int drm_getmap(struct drm_device *dev, void *data,
        map->type = r_list->map->type;
        map->flags = r_list->map->flags;
        map->handle = (void *)(unsigned long) r_list->user_token;
-
-#ifdef CONFIG_X86
-       /*
-        * There appears to be exactly one user of the mtrr index: dritest.
-        * It's easy enough to keep it working on non-PAT systems.
-        */
-       map->mtrr = phys_wc_to_mtrr_index(r_list->map->mtrr);
-#else
-       map->mtrr = -1;
-#endif
+       map->mtrr = arch_phys_wc_index(r_list->map->mtrr);
 
        mutex_unlock(&dev->struct_mutex);
 
index c8a34476570a4e95bd1c2abb67660fa5f5b57620..af9662e582727ba403cc829bc89ae9a4e3878b3b 100644 (file)
@@ -131,12 +131,11 @@ static void drm_update_vblank_count(struct drm_device *dev, int crtc)
 
        /* Reinitialize corresponding vblank timestamp if high-precision query
         * available. Skip this step if query unsupported or failed. Will
-        * reinitialize delayed at next vblank interrupt in that case.
+        * reinitialize delayed at next vblank interrupt in that case and
+        * assign 0 for now, to mark the vblanktimestamp as invalid.
         */
-       if (rc) {
-               tslot = atomic_read(&vblank->count) + diff;
-               vblanktimestamp(dev, crtc, tslot) = t_vblank;
-       }
+       tslot = atomic_read(&vblank->count) + diff;
+       vblanktimestamp(dev, crtc, tslot) = rc ? t_vblank : (struct timeval) {0, 0};
 
        smp_mb__before_atomic();
        atomic_add(diff, &vblank->count);
index 40c1db9ad7c3fac84365cc331ec67661e902abb5..2f0ed11024eb8322676e000066a238e55b0dcb9b 100644 (file)
@@ -465,6 +465,9 @@ int drm_plane_helper_commit(struct drm_plane *plane,
                if (!crtc[i])
                        continue;
 
+               if (crtc[i]->cursor == plane)
+                       continue;
+
                /* There's no other way to figure out whether the crtc is running. */
                ret = drm_crtc_vblank_get(crtc[i]);
                if (ret == 0) {
index ffc305fc20768c29af6883eeb2d70553839cfa6e..eb7e61078a5b6f1088489b49b42b32abe8ffca42 100644 (file)
@@ -217,7 +217,7 @@ static ssize_t status_store(struct device *device,
 
        mutex_unlock(&dev->mode_config.mutex);
 
-       return ret;
+       return ret ? ret : count;
 }
 
 static ssize_t status_show(struct device *device,
index 1f7e33f59de69e3ecf6c4f4ac865c1746d491d8d..6714e5b193ead813ab44f77d0f890f5cd359491f 100644 (file)
@@ -91,7 +91,7 @@ static void decon_wait_for_vblank(struct exynos_drm_crtc *crtc)
 
 static void decon_clear_channel(struct decon_context *ctx)
 {
-       int win, ch_enabled = 0;
+       unsigned int win, ch_enabled = 0;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -710,7 +710,7 @@ static void decon_dpms(struct exynos_drm_crtc *crtc, int mode)
        }
 }
 
-static struct exynos_drm_crtc_ops decon_crtc_ops = {
+static const struct exynos_drm_crtc_ops decon_crtc_ops = {
        .dpms = decon_dpms,
        .mode_fixup = decon_mode_fixup,
        .commit = decon_commit,
index 1dbfba58f9091b70aac969a55e270d844ea5fb05..30feb7d066244bfa078e36a518293a9195c3437c 100644 (file)
@@ -32,7 +32,6 @@
 #include <drm/bridge/ptn3460.h>
 
 #include "exynos_dp_core.h"
-#include "exynos_drm_fimd.h"
 
 #define ctx_from_connector(c)  container_of(c, struct exynos_dp_device, \
                                        connector)
@@ -196,7 +195,7 @@ static int exynos_dp_read_edid(struct exynos_dp_device *dp)
                }
        }
 
-       dev_err(dp->dev, "EDID Read success!\n");
+       dev_dbg(dp->dev, "EDID Read success!\n");
        return 0;
 }
 
@@ -1066,6 +1065,8 @@ static void exynos_dp_phy_exit(struct exynos_dp_device *dp)
 
 static void exynos_dp_poweron(struct exynos_dp_device *dp)
 {
+       struct exynos_drm_crtc *crtc = dp_to_crtc(dp);
+
        if (dp->dpms_mode == DRM_MODE_DPMS_ON)
                return;
 
@@ -1076,7 +1077,8 @@ static void exynos_dp_poweron(struct exynos_dp_device *dp)
                }
        }
 
-       fimd_dp_clock_enable(dp_to_crtc(dp), true);
+       if (crtc->ops->clock_enable)
+               crtc->ops->clock_enable(dp_to_crtc(dp), true);
 
        clk_prepare_enable(dp->clock);
        exynos_dp_phy_init(dp);
@@ -1087,6 +1089,8 @@ static void exynos_dp_poweron(struct exynos_dp_device *dp)
 
 static void exynos_dp_poweroff(struct exynos_dp_device *dp)
 {
+       struct exynos_drm_crtc *crtc = dp_to_crtc(dp);
+
        if (dp->dpms_mode != DRM_MODE_DPMS_ON)
                return;
 
@@ -1102,7 +1106,8 @@ static void exynos_dp_poweroff(struct exynos_dp_device *dp)
        exynos_dp_phy_exit(dp);
        clk_disable_unprepare(dp->clock);
 
-       fimd_dp_clock_enable(dp_to_crtc(dp), false);
+       if (crtc->ops->clock_enable)
+               crtc->ops->clock_enable(dp_to_crtc(dp), false);
 
        if (dp->panel) {
                if (drm_panel_unprepare(dp->panel))
index eb49195cec5c2396831ebfbf37e42bb5fa54ed81..9006b947e03c0a431141144c7ed21ae88b14f02b 100644 (file)
@@ -238,11 +238,11 @@ static struct drm_crtc_funcs exynos_crtc_funcs = {
 };
 
 struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
-                                              struct drm_plane *plane,
-                                              int pipe,
-                                              enum exynos_drm_output_type type,
-                                              struct exynos_drm_crtc_ops *ops,
-                                              void *ctx)
+                                       struct drm_plane *plane,
+                                       int pipe,
+                                       enum exynos_drm_output_type type,
+                                       const struct exynos_drm_crtc_ops *ops,
+                                       void *ctx)
 {
        struct exynos_drm_crtc *exynos_crtc;
        struct exynos_drm_private *private = drm_dev->dev_private;
index 0ecd8fc45cff3349afe5d5858a713da124e30944..0f3aa70818e31059bfc6369c258dcd326fa07d53 100644 (file)
 #include "exynos_drm_drv.h"
 
 struct exynos_drm_crtc *exynos_drm_crtc_create(struct drm_device *drm_dev,
-                                              struct drm_plane *plane,
-                                              int pipe,
-                                              enum exynos_drm_output_type type,
-                                              struct exynos_drm_crtc_ops *ops,
-                                              void *context);
+                                       struct drm_plane *plane,
+                                       int pipe,
+                                       enum exynos_drm_output_type type,
+                                       const struct exynos_drm_crtc_ops *ops,
+                                       void *context);
 int exynos_drm_crtc_enable_vblank(struct drm_device *dev, int pipe);
 void exynos_drm_crtc_disable_vblank(struct drm_device *dev, int pipe);
 void exynos_drm_crtc_finish_pageflip(struct drm_device *dev, int pipe);
index e12ecb5d5d9aa01a7618eafb8930b5dc552cb14e..29e3fb78c615fa7ad58ee21a1c7c0ad19bd6ffd3 100644 (file)
@@ -71,13 +71,6 @@ enum exynos_drm_output_type {
  * @dma_addr: array of bus(accessed by dma) address to the memory region
  *           allocated for a overlay.
  * @zpos: order of overlay layer(z position).
- * @index_color: if using color key feature then this value would be used
- *                     as index color.
- * @default_win: a window to be enabled.
- * @color_key: color key on or off.
- * @local_path: in case of lcd type, local path mode on or off.
- * @transparency: transparency on or off.
- * @activated: activated or not.
  * @enabled: enabled or not.
  * @resume: to resume or not.
  *
@@ -108,13 +101,7 @@ struct exynos_drm_plane {
        uint32_t pixel_format;
        dma_addr_t dma_addr[MAX_FB_BUFFER];
        unsigned int zpos;
-       unsigned int index_color;
 
-       bool default_win:1;
-       bool color_key:1;
-       bool local_path:1;
-       bool transparency:1;
-       bool activated:1;
        bool enabled:1;
        bool resume:1;
 };
@@ -181,6 +168,10 @@ struct exynos_drm_display {
  * @win_disable: disable hardware specific overlay.
  * @te_handler: trigger to transfer video image at the tearing effect
  *     synchronization signal if there is a page flip request.
+ * @clock_enable: optional function enabling/disabling display domain clock,
+ *     called from exynos-dp driver before powering up (with
+ *     'enable' argument as true) and after powering down (with
+ *     'enable' as false).
  */
 struct exynos_drm_crtc;
 struct exynos_drm_crtc_ops {
@@ -195,6 +186,7 @@ struct exynos_drm_crtc_ops {
        void (*win_commit)(struct exynos_drm_crtc *crtc, unsigned int zpos);
        void (*win_disable)(struct exynos_drm_crtc *crtc, unsigned int zpos);
        void (*te_handler)(struct exynos_drm_crtc *crtc);
+       void (*clock_enable)(struct exynos_drm_crtc *crtc, bool enable);
 };
 
 /*
@@ -221,7 +213,7 @@ struct exynos_drm_crtc {
        unsigned int                    dpms;
        wait_queue_head_t               pending_flip_queue;
        struct drm_pending_vblank_event *event;
-       struct exynos_drm_crtc_ops      *ops;
+       const struct exynos_drm_crtc_ops        *ops;
        void                            *ctx;
 };
 
index 929cb03a8eab15d6daee766016dc59070130a240..142eb4e3f59ea5501805cc56bed7cbef4106ae7b 100644 (file)
@@ -171,43 +171,6 @@ exynos_drm_framebuffer_init(struct drm_device *dev,
        return &exynos_fb->fb;
 }
 
-static u32 exynos_drm_format_num_buffers(struct drm_mode_fb_cmd2 *mode_cmd)
-{
-       unsigned int cnt = 0;
-
-       if (mode_cmd->pixel_format != DRM_FORMAT_NV12)
-               return drm_format_num_planes(mode_cmd->pixel_format);
-
-       while (cnt != MAX_FB_BUFFER) {
-               if (!mode_cmd->handles[cnt])
-                       break;
-               cnt++;
-       }
-
-       /*
-        * check if NV12 or NV12M.
-        *
-        * NV12
-        * handles[0] = base1, offsets[0] = 0
-        * handles[1] = base1, offsets[1] = Y_size
-        *
-        * NV12M
-        * handles[0] = base1, offsets[0] = 0
-        * handles[1] = base2, offsets[1] = 0
-        */
-       if (cnt == 2) {
-               /*
-                * in case of NV12 format, offsets[1] is not 0 and
-                * handles[0] is same as handles[1].
-                */
-               if (mode_cmd->offsets[1] &&
-                       mode_cmd->handles[0] == mode_cmd->handles[1])
-                       cnt = 1;
-       }
-
-       return cnt;
-}
-
 static struct drm_framebuffer *
 exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
                      struct drm_mode_fb_cmd2 *mode_cmd)
@@ -230,7 +193,7 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
 
        drm_helper_mode_fill_fb_struct(&exynos_fb->fb, mode_cmd);
        exynos_fb->exynos_gem_obj[0] = to_exynos_gem_obj(obj);
-       exynos_fb->buf_cnt = exynos_drm_format_num_buffers(mode_cmd);
+       exynos_fb->buf_cnt = drm_format_num_planes(mode_cmd->pixel_format);
 
        DRM_DEBUG_KMS("buf_cnt = %d\n", exynos_fb->buf_cnt);
 
index 9819fa6a9e2a41867da1ddaab9fb052dc0a74310..a0edab833148adf2abf99a3cde1d60eee6d5619b 100644 (file)
@@ -33,7 +33,6 @@
 #include "exynos_drm_crtc.h"
 #include "exynos_drm_plane.h"
 #include "exynos_drm_iommu.h"
-#include "exynos_drm_fimd.h"
 
 /*
  * FIMD stands for Fully Interactive Mobile Display and
@@ -216,7 +215,7 @@ static void fimd_wait_for_vblank(struct exynos_drm_crtc *crtc)
                DRM_DEBUG_KMS("vblank wait timed out.\n");
 }
 
-static void fimd_enable_video_output(struct fimd_context *ctx, int win,
+static void fimd_enable_video_output(struct fimd_context *ctx, unsigned int win,
                                        bool enable)
 {
        u32 val = readl(ctx->regs + WINCON(win));
@@ -229,7 +228,8 @@ static void fimd_enable_video_output(struct fimd_context *ctx, int win,
        writel(val, ctx->regs + WINCON(win));
 }
 
-static void fimd_enable_shadow_channel_path(struct fimd_context *ctx, int win,
+static void fimd_enable_shadow_channel_path(struct fimd_context *ctx,
+                                               unsigned int win,
                                                bool enable)
 {
        u32 val = readl(ctx->regs + SHADOWCON);
@@ -244,7 +244,7 @@ static void fimd_enable_shadow_channel_path(struct fimd_context *ctx, int win,
 
 static void fimd_clear_channel(struct fimd_context *ctx)
 {
-       int win, ch_enabled = 0;
+       unsigned int win, ch_enabled = 0;
 
        DRM_DEBUG_KMS("%s\n", __FILE__);
 
@@ -946,7 +946,24 @@ static void fimd_te_handler(struct exynos_drm_crtc *crtc)
                drm_handle_vblank(ctx->drm_dev, ctx->pipe);
 }
 
-static struct exynos_drm_crtc_ops fimd_crtc_ops = {
+static void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
+{
+       struct fimd_context *ctx = crtc->ctx;
+       u32 val;
+
+       /*
+        * Only Exynos 5250, 5260, 5410 and 542x requires enabling DP/MIE
+        * clock. On these SoCs the bootloader may enable it but any
+        * power domain off/on will reset it to disable state.
+        */
+       if (ctx->driver_data != &exynos5_fimd_driver_data)
+               return;
+
+       val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
+       writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON);
+}
+
+static const struct exynos_drm_crtc_ops fimd_crtc_ops = {
        .dpms = fimd_dpms,
        .mode_fixup = fimd_mode_fixup,
        .commit = fimd_commit,
@@ -956,6 +973,7 @@ static struct exynos_drm_crtc_ops fimd_crtc_ops = {
        .win_commit = fimd_win_commit,
        .win_disable = fimd_win_disable,
        .te_handler = fimd_te_handler,
+       .clock_enable = fimd_dp_clock_enable,
 };
 
 static irqreturn_t fimd_irq_handler(int irq, void *dev_id)
@@ -1025,12 +1043,7 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
        if (ctx->display)
                exynos_drm_create_enc_conn(drm_dev, ctx->display);
 
-       ret = fimd_iommu_attach_devices(ctx, drm_dev);
-       if (ret)
-               return ret;
-
-       return 0;
-
+       return fimd_iommu_attach_devices(ctx, drm_dev);
 }
 
 static void fimd_unbind(struct device *dev, struct device *master,
@@ -1192,24 +1205,6 @@ static int fimd_remove(struct platform_device *pdev)
        return 0;
 }
 
-void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable)
-{
-       struct fimd_context *ctx = crtc->ctx;
-       u32 val;
-
-       /*
-        * Only Exynos 5250, 5260, 5410 and 542x requires enabling DP/MIE
-        * clock. On these SoCs the bootloader may enable it but any
-        * power domain off/on will reset it to disable state.
-        */
-       if (ctx->driver_data != &exynos5_fimd_driver_data)
-               return;
-
-       val = enable ? DP_MIE_CLK_DP_ENABLE : DP_MIE_CLK_DISABLE;
-       writel(DP_MIE_CLK_DP_ENABLE, ctx->regs + DP_MIE_CLKCON);
-}
-EXPORT_SYMBOL_GPL(fimd_dp_clock_enable);
-
 struct platform_driver fimd_driver = {
        .probe          = fimd_probe,
        .remove         = fimd_remove,
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.h b/drivers/gpu/drm/exynos/exynos_drm_fimd.h
deleted file mode 100644 (file)
index b4fcaa5..0000000
+++ /dev/null
@@ -1,15 +0,0 @@
-/*
- * Copyright (c) 2015 Samsung Electronics Co., Ltd.
- *
- * This program is free software; you can redistribute  it and/or modify it
- * under  the terms of  the GNU General  Public License as published by the
- * Free Software Foundation;  either version 2 of the  License, or (at your
- * option) any later version.
- */
-
-#ifndef _EXYNOS_DRM_FIMD_H_
-#define _EXYNOS_DRM_FIMD_H_
-
-extern void fimd_dp_clock_enable(struct exynos_drm_crtc *crtc, bool enable);
-
-#endif /* _EXYNOS_DRM_FIMD_H_ */
index 13ea3349363b153a8225aece3acc3e70a05dbe51..b1180fbe754690f700c124bdd50f72adb28d2295 100644 (file)
@@ -76,7 +76,7 @@ int exynos_check_plane(struct drm_plane *plane, struct drm_framebuffer *fb)
                        return -EFAULT;
                }
 
-               exynos_plane->dma_addr[i] = buffer->dma_addr;
+               exynos_plane->dma_addr[i] = buffer->dma_addr + fb->offsets[i];
 
                DRM_DEBUG_KMS("buffer: %d, dma_addr = 0x%lx\n",
                                i, (unsigned long)exynos_plane->dma_addr[i]);
index 27e84ec21694d587f26a78d258c216d52bd450ea..1b3479a8db5f08dd2b3c1c4fa0de5758d2ae7b26 100644 (file)
@@ -217,7 +217,7 @@ static int vidi_ctx_initialize(struct vidi_context *ctx,
        return 0;
 }
 
-static struct exynos_drm_crtc_ops vidi_crtc_ops = {
+static const struct exynos_drm_crtc_ops vidi_crtc_ops = {
        .dpms = vidi_dpms,
        .enable_vblank = vidi_enable_vblank,
        .disable_vblank = vidi_disable_vblank,
index fbec750574e64a3158b232d15ef309b29ed5340c..8874c1fcb3ab778b7faa82b0cedf1e47a7c2cd7c 100644 (file)
 #define MIXER_WIN_NR           3
 #define MIXER_DEFAULT_WIN      0
 
+/* The pixelformats that are natively supported by the mixer. */
+#define MXR_FORMAT_RGB565      4
+#define MXR_FORMAT_ARGB1555    5
+#define MXR_FORMAT_ARGB4444    6
+#define MXR_FORMAT_ARGB8888    7
+
 struct mixer_resources {
        int                     irq;
        void __iomem            *mixer_regs;
@@ -327,7 +333,8 @@ static void mixer_cfg_rgb_fmt(struct mixer_context *ctx, unsigned int height)
        mixer_reg_writemask(res, MXR_CFG, val, MXR_CFG_RGB_FMT_MASK);
 }
 
-static void mixer_cfg_layer(struct mixer_context *ctx, int win, bool enable)
+static void mixer_cfg_layer(struct mixer_context *ctx, unsigned int win,
+                               bool enable)
 {
        struct mixer_resources *res = &ctx->mixer_res;
        u32 val = enable ? ~0 : 0;
@@ -359,8 +366,6 @@ static void mixer_run(struct mixer_context *ctx)
        struct mixer_resources *res = &ctx->mixer_res;
 
        mixer_reg_writemask(res, MXR_STATUS, ~0, MXR_STATUS_REG_RUN);
-
-       mixer_regs_dump(ctx);
 }
 
 static void mixer_stop(struct mixer_context *ctx)
@@ -373,16 +378,13 @@ static void mixer_stop(struct mixer_context *ctx)
        while (!(mixer_reg_read(res, MXR_STATUS) & MXR_STATUS_REG_IDLE) &&
                        --timeout)
                usleep_range(10000, 12000);
-
-       mixer_regs_dump(ctx);
 }
 
-static void vp_video_buffer(struct mixer_context *ctx, int win)
+static void vp_video_buffer(struct mixer_context *ctx, unsigned int win)
 {
        struct mixer_resources *res = &ctx->mixer_res;
        unsigned long flags;
        struct exynos_drm_plane *plane;
-       unsigned int buf_num = 1;
        dma_addr_t luma_addr[2], chroma_addr[2];
        bool tiled_mode = false;
        bool crcb_mode = false;
@@ -393,27 +395,18 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
        switch (plane->pixel_format) {
        case DRM_FORMAT_NV12:
                crcb_mode = false;
-               buf_num = 2;
                break;
-       /* TODO: single buffer format NV12, NV21 */
+       case DRM_FORMAT_NV21:
+               crcb_mode = true;
+               break;
        default:
-               /* ignore pixel format at disable time */
-               if (!plane->dma_addr[0])
-                       break;
-
                DRM_ERROR("pixel format for vp is wrong [%d].\n",
                                plane->pixel_format);
                return;
        }
 
-       if (buf_num == 2) {
-               luma_addr[0] = plane->dma_addr[0];
-               chroma_addr[0] = plane->dma_addr[1];
-       } else {
-               luma_addr[0] = plane->dma_addr[0];
-               chroma_addr[0] = plane->dma_addr[0]
-                       + (plane->pitch * plane->fb_height);
-       }
+       luma_addr[0] = plane->dma_addr[0];
+       chroma_addr[0] = plane->dma_addr[1];
 
        if (plane->scan_flag & DRM_MODE_FLAG_INTERLACE) {
                ctx->interlace = true;
@@ -484,6 +477,7 @@ static void vp_video_buffer(struct mixer_context *ctx, int win)
        mixer_vsync_set_update(ctx, true);
        spin_unlock_irqrestore(&res->reg_slock, flags);
 
+       mixer_regs_dump(ctx);
        vp_regs_dump(ctx);
 }
 
@@ -518,7 +512,7 @@ fail:
        return -ENOTSUPP;
 }
 
-static void mixer_graph_buffer(struct mixer_context *ctx, int win)
+static void mixer_graph_buffer(struct mixer_context *ctx, unsigned int win)
 {
        struct mixer_resources *res = &ctx->mixer_res;
        unsigned long flags;
@@ -531,20 +525,27 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
 
        plane = &ctx->planes[win];
 
-       #define RGB565 4
-       #define ARGB1555 5
-       #define ARGB4444 6
-       #define ARGB8888 7
+       switch (plane->pixel_format) {
+       case DRM_FORMAT_XRGB4444:
+               fmt = MXR_FORMAT_ARGB4444;
+               break;
 
-       switch (plane->bpp) {
-       case 16:
-               fmt = ARGB4444;
+       case DRM_FORMAT_XRGB1555:
+               fmt = MXR_FORMAT_ARGB1555;
                break;
-       case 32:
-               fmt = ARGB8888;
+
+       case DRM_FORMAT_RGB565:
+               fmt = MXR_FORMAT_RGB565;
+               break;
+
+       case DRM_FORMAT_XRGB8888:
+       case DRM_FORMAT_ARGB8888:
+               fmt = MXR_FORMAT_ARGB8888;
                break;
+
        default:
-               fmt = ARGB8888;
+               DRM_DEBUG_KMS("pixelformat unsupported by mixer\n");
+               return;
        }
 
        /* check if mixer supports requested scaling setup */
@@ -617,6 +618,8 @@ static void mixer_graph_buffer(struct mixer_context *ctx, int win)
 
        mixer_vsync_set_update(ctx, true);
        spin_unlock_irqrestore(&res->reg_slock, flags);
+
+       mixer_regs_dump(ctx);
 }
 
 static void vp_win_reset(struct mixer_context *ctx)
@@ -1070,6 +1073,7 @@ static void mixer_poweroff(struct mixer_context *ctx)
        mutex_unlock(&ctx->mixer_mutex);
 
        mixer_stop(ctx);
+       mixer_regs_dump(ctx);
        mixer_window_suspend(ctx);
 
        ctx->int_en = mixer_reg_read(res, MXR_INT_EN);
@@ -1126,7 +1130,7 @@ int mixer_check_mode(struct drm_display_mode *mode)
        return -EINVAL;
 }
 
-static struct exynos_drm_crtc_ops mixer_crtc_ops = {
+static const struct exynos_drm_crtc_ops mixer_crtc_ops = {
        .dpms                   = mixer_dpms,
        .enable_vblank          = mixer_enable_vblank,
        .disable_vblank         = mixer_disable_vblank,
@@ -1156,7 +1160,7 @@ static struct mixer_drv_data exynos4210_mxr_drv_data = {
        .has_sclk = 1,
 };
 
-static struct platform_device_id mixer_driver_types[] = {
+static const struct platform_device_id mixer_driver_types[] = {
        {
                .name           = "s5p-mixer",
                .driver_data    = (unsigned long)&exynos4210_mxr_drv_data,
index 007c7d7d82950f597bb05ef8388fb1696ef72b38..dc55c51964ab501720f02ae682118ce12a51f0ff 100644 (file)
@@ -1667,12 +1667,15 @@ static int i915_sr_status(struct seq_file *m, void *unused)
 
        if (HAS_PCH_SPLIT(dev))
                sr_enabled = I915_READ(WM1_LP_ILK) & WM1_LP_SR_EN;
-       else if (IS_CRESTLINE(dev) || IS_I945G(dev) || IS_I945GM(dev))
+       else if (IS_CRESTLINE(dev) || IS_G4X(dev) ||
+                IS_I945G(dev) || IS_I945GM(dev))
                sr_enabled = I915_READ(FW_BLC_SELF) & FW_BLC_SELF_EN;
        else if (IS_I915GM(dev))
                sr_enabled = I915_READ(INSTPM) & INSTPM_SELF_EN;
        else if (IS_PINEVIEW(dev))
                sr_enabled = I915_READ(DSPFW3) & PINEVIEW_SELF_REFRESH_EN;
+       else if (IS_VALLEYVIEW(dev))
+               sr_enabled = I915_READ(FW_BLC_SELF_VLV) & FW_CSPWRDWNEN;
 
        intel_runtime_pm_put(dev_priv);
 
index c302ffb5a16814b41086abe52a25c7f7debd40a8..a19d2c71e20505aba9ca25158d6e5acd6ba6f920 100644 (file)
@@ -699,6 +699,16 @@ static int i915_drm_resume(struct drm_device *dev)
        intel_init_pch_refclk(dev);
        drm_mode_config_reset(dev);
 
+       /*
+        * Interrupts have to be enabled before any batches are run. If not the
+        * GPU will hang. i915_gem_init_hw() will initiate batches to
+        * update/restore the context.
+        *
+        * Modeset enabling in intel_modeset_init_hw() also needs working
+        * interrupts.
+        */
+       intel_runtime_pm_enable_interrupts(dev_priv);
+
        mutex_lock(&dev->struct_mutex);
        if (i915_gem_init_hw(dev)) {
                DRM_ERROR("failed to re-initialize GPU, declaring wedged!\n");
@@ -706,9 +716,6 @@ static int i915_drm_resume(struct drm_device *dev)
        }
        mutex_unlock(&dev->struct_mutex);
 
-       /* We need working interrupts for modeset enabling ... */
-       intel_runtime_pm_enable_interrupts(dev_priv);
-
        intel_modeset_init_hw(dev);
 
        spin_lock_irq(&dev_priv->irq_lock);
index 53394f998a1f9429f87b78598a69e232a48d5b38..2d0995e7afc37482a594be7e25b5baaadc6a6798 100644 (file)
@@ -3003,8 +3003,8 @@ int i915_vma_unbind(struct i915_vma *vma)
                } else if (vma->ggtt_view.pages) {
                        sg_free_table(vma->ggtt_view.pages);
                        kfree(vma->ggtt_view.pages);
-                       vma->ggtt_view.pages = NULL;
                }
+               vma->ggtt_view.pages = NULL;
        }
 
        drm_mm_remove_node(&vma->node);
index a3190e793ed43744980bedba4ed42e1d0e38d597..cc552a4c1f3b20a714e2ba73bbb59d3559451d0e 100644 (file)
@@ -32,6 +32,7 @@
 #include "i915_trace.h"
 #include "intel_drv.h"
 #include <linux/dma_remapping.h>
+#include <linux/uaccess.h>
 
 #define  __EXEC_OBJECT_HAS_PIN (1<<31)
 #define  __EXEC_OBJECT_HAS_FENCE (1<<30)
@@ -465,7 +466,7 @@ i915_gem_execbuffer_relocate_entry(struct drm_i915_gem_object *obj,
        }
 
        /* We can't wait for rendering with pagefaults disabled */
-       if (obj->active && in_atomic())
+       if (obj->active && pagefault_disabled())
                return -EFAULT;
 
        if (use_cpu_reloc(obj))
index d547d9c8dda2226909fcdbc5193dde4e2013246a..d0f3cbc87474c1ff9aff408fd692426a7e70c5d4 100644 (file)
@@ -13635,9 +13635,6 @@ static const struct intel_dmi_quirk intel_dmi_quirks[] = {
 };
 
 static struct intel_quirk intel_quirks[] = {
-       /* HP Mini needs pipe A force quirk (LP: #322104) */
-       { 0x27ae, 0x103c, 0x361a, quirk_pipea_force },
-
        /* Toshiba Protege R-205, S-209 needs pipe A force quirk */
        { 0x2592, 0x1179, 0x0001, quirk_pipea_force },
 
index d0237102c27ecf1323e39ee1d67cee07e3760f60..d714a4b5711e4e7fa390ec6b659d2683ef41f585 100644 (file)
@@ -880,10 +880,8 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
                                      DP_AUX_CH_CTL_RECEIVE_ERROR))
                                continue;
                        if (status & DP_AUX_CH_CTL_DONE)
-                               break;
+                               goto done;
                }
-               if (status & DP_AUX_CH_CTL_DONE)
-                       break;
        }
 
        if ((status & DP_AUX_CH_CTL_DONE) == 0) {
@@ -892,6 +890,7 @@ intel_dp_aux_ch(struct intel_dp *intel_dp,
                goto out;
        }
 
+done:
        /* Check for timeout or receive error.
         * Timeouts occur when the sink is not connected
         */
@@ -1348,7 +1347,7 @@ intel_dp_compute_config(struct intel_encoder *encoder,
 
        pipe_config->has_dp_encoder = true;
        pipe_config->has_drrs = false;
-       pipe_config->has_audio = intel_dp->has_audio;
+       pipe_config->has_audio = intel_dp->has_audio && port != PORT_A;
 
        if (is_edp(intel_dp) && intel_connector->panel.fixed_mode) {
                intel_fixed_panel_mode(intel_connector->panel.fixed_mode,
@@ -2211,8 +2210,8 @@ static void intel_dp_get_config(struct intel_encoder *encoder,
        int dotclock;
 
        tmp = I915_READ(intel_dp->output_reg);
-       if (tmp & DP_AUDIO_OUTPUT_ENABLE)
-               pipe_config->has_audio = true;
+
+       pipe_config->has_audio = tmp & DP_AUDIO_OUTPUT_ENABLE && port != PORT_A;
 
        if ((port == PORT_A) || !HAS_PCH_CPT(dev)) {
                if (tmp & DP_SYNC_HS_HIGH)
@@ -3812,7 +3811,8 @@ intel_dp_get_dpcd(struct intel_dp *intel_dp)
                        if (val == 0)
                                break;
 
-                       intel_dp->sink_rates[i] = val * 200;
+                       /* Value read is in kHz while drm clock is saved in deca-kHz */
+                       intel_dp->sink_rates[i] = (val * 200) / 10;
                }
                intel_dp->num_sink_rates = i;
        }
index 56e437e3158021a09641d188affc6129f0b1eda8..ae628001fd97873b67f99fb0128167858948afe6 100644 (file)
@@ -435,7 +435,7 @@ gmbus_xfer(struct i2c_adapter *adapter,
                                               struct intel_gmbus,
                                               adapter);
        struct drm_i915_private *dev_priv = bus->dev_priv;
-       int i, reg_offset;
+       int i = 0, inc, try = 0, reg_offset;
        int ret = 0;
 
        intel_aux_display_runtime_get(dev_priv);
@@ -448,12 +448,14 @@ gmbus_xfer(struct i2c_adapter *adapter,
 
        reg_offset = dev_priv->gpio_mmio_base;
 
+retry:
        I915_WRITE(GMBUS0 + reg_offset, bus->reg0);
 
-       for (i = 0; i < num; i++) {
+       for (; i < num; i += inc) {
+               inc = 1;
                if (gmbus_is_index_read(msgs, i, num)) {
                        ret = gmbus_xfer_index_read(dev_priv, &msgs[i]);
-                       i += 1;  /* set i to the index of the read xfer */
+                       inc = 2; /* an index read is two msgs */
                } else if (msgs[i].flags & I2C_M_RD) {
                        ret = gmbus_xfer_read(dev_priv, &msgs[i], 0);
                } else {
@@ -525,6 +527,18 @@ clear_err:
                         adapter->name, msgs[i].addr,
                         (msgs[i].flags & I2C_M_RD) ? 'r' : 'w', msgs[i].len);
 
+       /*
+        * Passive adapters sometimes NAK the first probe. Retry the first
+        * message once on -ENXIO for GMBUS transfers; the bit banging algorithm
+        * has retries internally. See also the retry loop in
+        * drm_do_probe_ddc_edid, which bails out on the first -ENXIO.
+        */
+       if (ret == -ENXIO && i == 0 && try++ == 0) {
+               DRM_DEBUG_KMS("GMBUS [%s] NAK on first message, retry\n",
+                             adapter->name);
+               goto retry;
+       }
+
        goto out;
 
 timeout:
index 09df74b8e917b1dac90d460be50d1c4c5152881c..424e6219778712dcaf0e7c5c1ae7c51709fce6ba 100644 (file)
@@ -1134,6 +1134,12 @@ static int gen8_init_common_ring(struct intel_engine_cs *ring)
        I915_WRITE_IMR(ring, ~(ring->irq_enable_mask | ring->irq_keep_mask));
        I915_WRITE(RING_HWSTAM(ring->mmio_base), 0xffffffff);
 
+       if (ring->status_page.obj) {
+               I915_WRITE(RING_HWS_PGA(ring->mmio_base),
+                          (u32)ring->status_page.gfx_addr);
+               POSTING_READ(RING_HWS_PGA(ring->mmio_base));
+       }
+
        I915_WRITE(RING_MODE_GEN7(ring),
                   _MASKED_BIT_DISABLE(GFX_REPLAY_MODE) |
                   _MASKED_BIT_ENABLE(GFX_RUN_LIST_ENABLE));
index 5abda1d2c0182ad9f4bec4ad7e9fa4c640cb9d32..fbcc7dff0d630f3292efa80d881a859954c91266 100644 (file)
@@ -813,12 +813,28 @@ static int intel_dual_link_lvds_callback(const struct dmi_system_id *id)
 static const struct dmi_system_id intel_dual_link_lvds[] = {
        {
                .callback = intel_dual_link_lvds_callback,
-               .ident = "Apple MacBook Pro (Core i5/i7 Series)",
+               .ident = "Apple MacBook Pro 15\" (2010)",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro6,2"),
+               },
+       },
+       {
+               .callback = intel_dual_link_lvds_callback,
+               .ident = "Apple MacBook Pro 15\" (2011)",
                .matches = {
                        DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
                        DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro8,2"),
                },
        },
+       {
+               .callback = intel_dual_link_lvds_callback,
+               .ident = "Apple MacBook Pro 15\" (2012)",
+               .matches = {
+                       DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
+                       DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro9,1"),
+               },
+       },
        { }     /* terminating entry */
 };
 
@@ -848,6 +864,11 @@ static bool compute_is_dual_link_lvds(struct intel_lvds_encoder *lvds_encoder)
        if (i915.lvds_channel_mode > 0)
                return i915.lvds_channel_mode == 2;
 
+       /* single channel LVDS is limited to 112 MHz */
+       if (lvds_encoder->attached_connector->base.panel.fixed_mode->clock
+           > 112999)
+               return true;
+
        if (dmi_check_system(intel_dual_link_lvds))
                return true;
 
@@ -1111,6 +1132,8 @@ void intel_lvds_init(struct drm_device *dev)
 out:
        mutex_unlock(&dev->mode_config.mutex);
 
+       intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
+
        lvds_encoder->is_dual_link = compute_is_dual_link_lvds(lvds_encoder);
        DRM_DEBUG_KMS("detected %s-link lvds configuration\n",
                      lvds_encoder->is_dual_link ? "dual" : "single");
@@ -1125,7 +1148,6 @@ out:
        }
        drm_connector_register(connector);
 
-       intel_panel_init(&intel_connector->panel, fixed_mode, downclock_mode);
        intel_panel_setup_backlight(connector, INVALID_PIPE);
 
        return;
index fa4ccb346389e2369effb3b3c48f6f211afa4832..555b896d2bdadec44aebc821a2f7fd9a09d58441 100644 (file)
@@ -2045,22 +2045,20 @@ static void ilk_compute_wm_parameters(struct drm_crtc *crtc,
        p->pipe_htotal = intel_crtc->config->base.adjusted_mode.crtc_htotal;
        p->pixel_rate = ilk_pipe_pixel_rate(dev, crtc);
 
-       if (crtc->primary->state->fb) {
-               p->pri.enabled = true;
+       if (crtc->primary->state->fb)
                p->pri.bytes_per_pixel =
                        crtc->primary->state->fb->bits_per_pixel / 8;
-       } else {
-               p->pri.enabled = false;
-               p->pri.bytes_per_pixel = 0;
-       }
+       else
+               p->pri.bytes_per_pixel = 4;
+
+       p->cur.bytes_per_pixel = 4;
+       /*
+        * TODO: for now, assume primary and cursor planes are always enabled.
+        * Setting them to false makes the screen flicker.
+        */
+       p->pri.enabled = true;
+       p->cur.enabled = true;
 
-       if (crtc->cursor->state->fb) {
-               p->cur.enabled = true;
-               p->cur.bytes_per_pixel = 4;
-       } else {
-               p->cur.enabled = false;
-               p->cur.bytes_per_pixel = 0;
-       }
        p->pri.horiz_pixels = intel_crtc->config->pipe_src_w;
        p->cur.horiz_pixels = intel_crtc->base.cursor->state->crtc_w;
 
index 441e2502b88946ff2d7455a9f26cc32faa87d8fc..005b5e04de4d74d13eee87af223c9e22687f6d35 100644 (file)
@@ -901,13 +901,6 @@ static int chv_init_workarounds(struct intel_engine_cs *ring)
                            GEN6_WIZ_HASHING_MASK,
                            GEN6_WIZ_HASHING_16x4);
 
-       if (INTEL_REVID(dev) == SKL_REVID_C0 ||
-           INTEL_REVID(dev) == SKL_REVID_D0)
-               /* WaBarrierPerformanceFixDisable:skl */
-               WA_SET_BIT_MASKED(HDC_CHICKEN0,
-                                 HDC_FENCE_DEST_SLM_DISABLE |
-                                 HDC_BARRIER_PERFORMANCE_DISABLE);
-
        return 0;
 }
 
@@ -1024,6 +1017,13 @@ static int skl_init_workarounds(struct intel_engine_cs *ring)
                WA_SET_BIT_MASKED(HIZ_CHICKEN,
                                  BDW_HIZ_POWER_COMPILER_CLOCK_GATING_DISABLE);
 
+       if (INTEL_REVID(dev) == SKL_REVID_C0 ||
+           INTEL_REVID(dev) == SKL_REVID_D0)
+               /* WaBarrierPerformanceFixDisable:skl */
+               WA_SET_BIT_MASKED(HDC_CHICKEN0,
+                                 HDC_FENCE_DEST_SLM_DISABLE |
+                                 HDC_BARRIER_PERFORMANCE_DISABLE);
+
        return skl_tune_iz_hashing(ring);
 }
 
index e87d2f418de4f381d50471494e5fe8050de4bdb2..987b81f31b0e693cfe7d505b2f66eecc7eac6539 100644 (file)
@@ -2550,7 +2550,7 @@ intel_sdvo_analog_init(struct intel_sdvo *intel_sdvo, int device)
 
        DRM_DEBUG_KMS("initialising analog device %d\n", device);
 
-       intel_sdvo_connector = kzalloc(sizeof(*intel_sdvo_connector), GFP_KERNEL);
+       intel_sdvo_connector = intel_sdvo_connector_alloc();
        if (!intel_sdvo_connector)
                return false;
 
index 6e84df9369a657223d17387ad14929cdf435e238..ad4b9010dfb0bbed135185e9f64aed98c3239a24 100644 (file)
@@ -1526,6 +1526,11 @@ static int mga_vga_mode_valid(struct drm_connector *connector,
                return MODE_BANDWIDTH;
        }
 
+       if ((mode->hdisplay % 8) != 0 || (mode->hsync_start % 8) != 0 ||
+           (mode->hsync_end % 8) != 0 || (mode->htotal % 8) != 0) {
+               return MODE_H_ILLEGAL;
+       }
+
        if (mode->crtc_hdisplay > 2048 || mode->crtc_hsync_start > 4096 ||
            mode->crtc_hsync_end > 4096 || mode->crtc_htotal > 4096 ||
            mode->crtc_vdisplay > 2048 || mode->crtc_vsync_start > 4096 ||
index 94a5bee69fe724c94542bc5181e4309e78b78300..bbdcab0a56c1734d672457623d0eb071b881ceda 100644 (file)
@@ -384,7 +384,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *gpu)
        if (gpu->memptrs_bo) {
                if (gpu->memptrs_iova)
                        msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
-               drm_gem_object_unreference(gpu->memptrs_bo);
+               drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
        }
        release_firmware(gpu->pm4);
        release_firmware(gpu->pfp);
index 28d1f95a90ccf0c87b10b26818ce0d14f106d1b9..ad50b80225f5b95d7f90252ea4ba086109a730d7 100644 (file)
@@ -177,6 +177,11 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
                goto fail;
        }
 
+       for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
+               encoders[i]->bridge = msm_dsi->bridge;
+               msm_dsi->encoders[i] = encoders[i];
+       }
+
        msm_dsi->connector = msm_dsi_manager_connector_init(msm_dsi->id);
        if (IS_ERR(msm_dsi->connector)) {
                ret = PTR_ERR(msm_dsi->connector);
@@ -185,11 +190,6 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
                goto fail;
        }
 
-       for (i = 0; i < MSM_DSI_ENCODER_NUM; i++) {
-               encoders[i]->bridge = msm_dsi->bridge;
-               msm_dsi->encoders[i] = encoders[i];
-       }
-
        priv->bridges[priv->num_bridges++]       = msm_dsi->bridge;
        priv->connectors[priv->num_connectors++] = msm_dsi->connector;
 
index 956b22492c9a8f81db4ad38abc1a4edbb477bd38..649d20d29f9298a8852e6d0368008becb348fe03 100644 (file)
@@ -1023,7 +1023,7 @@ static int dsi_short_read1_resp(u8 *buf, const struct mipi_dsi_msg *msg)
                *data = buf[1]; /* strip out dcs type */
                return 1;
        } else {
-               pr_err("%s: read data does not match with rx_buf len %d\n",
+               pr_err("%s: read data does not match with rx_buf len %zu\n",
                        __func__, msg->rx_len);
                return -EINVAL;
        }
@@ -1040,7 +1040,7 @@ static int dsi_short_read2_resp(u8 *buf, const struct mipi_dsi_msg *msg)
                data[1] = buf[2];
                return 2;
        } else {
-               pr_err("%s: read data does not match with rx_buf len %d\n",
+               pr_err("%s: read data does not match with rx_buf len %zu\n",
                        __func__, msg->rx_len);
                return -EINVAL;
        }
@@ -1093,7 +1093,6 @@ static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
 {
        u32 *lp, *temp, data;
        int i, j = 0, cnt;
-       bool ack_error = false;
        u32 read_cnt;
        u8 reg[16];
        int repeated_bytes = 0;
@@ -1105,15 +1104,10 @@ static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
        if (cnt > 4)
                cnt = 4; /* 4 x 32 bits registers only */
 
-       /* Calculate real read data count */
-       read_cnt = dsi_read(msm_host, 0x1d4) >> 16;
-
-       ack_error = (rx_byte == 4) ?
-               (read_cnt == 8) : /* short pkt + 4-byte error pkt */
-               (read_cnt == (pkt_size + 6 + 4)); /* long pkt+4-byte error pkt*/
-
-       if (ack_error)
-               read_cnt -= 4; /* Remove 4 byte error pkt */
+       if (rx_byte == 4)
+               read_cnt = 4;
+       else
+               read_cnt = pkt_size + 6;
 
        /*
         * In case of multiple reads from the panel, after the first read, there
@@ -1215,7 +1209,7 @@ static void dsi_err_worker(struct work_struct *work)
                container_of(work, struct msm_dsi_host, err_work);
        u32 status = msm_host->err_work_state;
 
-       pr_err("%s: status=%x\n", __func__, status);
+       pr_err_ratelimited("%s: status=%x\n", __func__, status);
        if (status & DSI_ERR_STATE_MDP_FIFO_UNDERFLOW)
                dsi_sw_reset_restore(msm_host);
 
@@ -1797,6 +1791,7 @@ int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
        case MIPI_DSI_RX_ACKNOWLEDGE_AND_ERROR_REPORT:
                pr_err("%s: rx ACK_ERR_PACLAGE\n", __func__);
                ret = 0;
+               break;
        case MIPI_DSI_RX_GENERIC_SHORT_READ_RESPONSE_1BYTE:
        case MIPI_DSI_RX_DCS_SHORT_READ_RESPONSE_1BYTE:
                ret = dsi_short_read1_resp(buf, msg);
index ee3ebcaa33f52f09cfe46dc34be7b7894b2753fa..0a40f3c64e8b3d05ce78af069f97851f8561c502 100644 (file)
@@ -462,7 +462,7 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
        struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
        struct drm_connector *connector = NULL;
        struct dsi_connector *dsi_connector;
-       int ret;
+       int ret, i;
 
        dsi_connector = devm_kzalloc(msm_dsi->dev->dev,
                                sizeof(*dsi_connector), GFP_KERNEL);
@@ -495,6 +495,10 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
        if (ret)
                goto fail;
 
+       for (i = 0; i < MSM_DSI_ENCODER_NUM; i++)
+               drm_mode_connector_attach_encoder(connector,
+                                               msm_dsi->encoders[i]);
+
        return connector;
 
 fail:
index 5f5a84f6074c73469a3a7e19e37fc1d5f55edab2..208f9d47f82ece1a98d7ea7d1bde3281ca1838c1 100644 (file)
@@ -132,7 +132,7 @@ ssize_t edp_aux_transfer(struct drm_dp_aux *drm_aux, struct drm_dp_aux_msg *msg)
        /* msg sanity check */
        if ((native && (msg->size > AUX_CMD_NATIVE_MAX)) ||
                (msg->size > AUX_CMD_I2C_MAX)) {
-               pr_err("%s: invalid msg: size(%d), request(%x)\n",
+               pr_err("%s: invalid msg: size(%zu), request(%x)\n",
                        __func__, msg->size, msg->request);
                return -EINVAL;
        }
@@ -155,7 +155,7 @@ ssize_t edp_aux_transfer(struct drm_dp_aux *drm_aux, struct drm_dp_aux_msg *msg)
                 */
                edp_write(aux->base + REG_EDP_AUX_TRANS_CTRL, 0);
                msm_edp_aux_ctrl(aux, 1);
-               pr_err("%s: aux timeout, %d\n", __func__, ret);
+               pr_err("%s: aux timeout, %zd\n", __func__, ret);
                goto unlock_exit;
        }
        DBG("completion");
index d8812e84da54332388ea9acad7d44403c27178e8..b4d1b469862a4c0281156f578492b8de43b2765e 100644 (file)
@@ -151,6 +151,8 @@ struct drm_connector *msm_edp_connector_init(struct msm_edp *edp)
        if (ret)
                goto fail;
 
+       drm_mode_connector_attach_encoder(connector, edp->encoder);
+
        return connector;
 
 fail:
index 0ec5abdba5c421b4a7604c4fa1b70ff8e3c129f9..29e52d7c61c06873c712254dd3ba4c14c0284f2d 100644 (file)
@@ -1149,12 +1149,13 @@ int msm_edp_ctrl_init(struct msm_edp *edp)
        ctrl->aux = msm_edp_aux_init(dev, ctrl->base, &ctrl->drm_aux);
        if (!ctrl->aux || !ctrl->drm_aux) {
                pr_err("%s:failed to init aux\n", __func__);
-               return ret;
+               return -ENOMEM;
        }
 
        ctrl->phy = msm_edp_phy_init(dev, ctrl->base);
        if (!ctrl->phy) {
                pr_err("%s:failed to init phy\n", __func__);
+               ret = -ENOMEM;
                goto err_destory_aux;
        }
 
index e001e6b2296a2ebd5cf602fbf0e3ba89f49ecf0d..8b9a7931b1624365dac35be9ca492b04967487b0 100644 (file)
@@ -72,14 +72,13 @@ const struct mdp5_cfg_hw msm8x74_config = {
                .base = { 0x12d00, 0x12e00, 0x12f00 },
        },
        .intf = {
-               .count = 4,
                .base = { 0x12500, 0x12700, 0x12900, 0x12b00 },
-       },
-       .intfs = {
-               [0] = INTF_eDP,
-               [1] = INTF_DSI,
-               [2] = INTF_DSI,
-               [3] = INTF_HDMI,
+               .connect = {
+                       [0] = INTF_eDP,
+                       [1] = INTF_DSI,
+                       [2] = INTF_DSI,
+                       [3] = INTF_HDMI,
+               },
        },
        .max_clk = 200000000,
 };
@@ -142,14 +141,13 @@ const struct mdp5_cfg_hw apq8084_config = {
                .base = { 0x12f00, 0x13000, 0x13100, 0x13200 },
        },
        .intf = {
-               .count = 5,
                .base = { 0x12500, 0x12700, 0x12900, 0x12b00, 0x12d00 },
-       },
-       .intfs = {
-               [0] = INTF_eDP,
-               [1] = INTF_DSI,
-               [2] = INTF_DSI,
-               [3] = INTF_HDMI,
+               .connect = {
+                       [0] = INTF_eDP,
+                       [1] = INTF_DSI,
+                       [2] = INTF_DSI,
+                       [3] = INTF_HDMI,
+               },
        },
        .max_clk = 320000000,
 };
@@ -196,10 +194,12 @@ const struct mdp5_cfg_hw msm8x16_config = {
 
        },
        .intf = {
-               .count = 1, /* INTF_1 */
-               .base = { 0x6B800 },
+               .base = { 0x00000, 0x6b800 },
+               .connect = {
+                       [0] = INTF_DISABLED,
+                       [1] = INTF_DSI,
+               },
        },
-       /* TODO enable .intfs[] with [1] = INTF_DSI, once DSI is implemented */
        .max_clk = 320000000,
 };
 
index 3a551b0892d847e50fb48054bfb6c409887fd40b..69349abe59f2a4a614a9a9379b5c26f232087a3c 100644 (file)
@@ -59,6 +59,11 @@ struct mdp5_smp_block {
 
 #define MDP5_INTF_NUM_MAX      5
 
+struct mdp5_intf_block {
+       uint32_t base[MAX_BASES];
+       u32 connect[MDP5_INTF_NUM_MAX]; /* array of enum mdp5_intf_type */
+};
+
 struct mdp5_cfg_hw {
        char  *name;
 
@@ -72,9 +77,7 @@ struct mdp5_cfg_hw {
        struct mdp5_sub_block dspp;
        struct mdp5_sub_block ad;
        struct mdp5_sub_block pp;
-       struct mdp5_sub_block intf;
-
-       u32 intfs[MDP5_INTF_NUM_MAX]; /* array of enum mdp5_intf_type */
+       struct mdp5_intf_block intf;
 
        uint32_t max_clk;
 };
index dfa8beb9343aaa171110adabb49c549e4143c0da..bbacf9d2b7383cc12d21bfeb48327c4b04604f56 100644 (file)
@@ -206,8 +206,8 @@ static struct drm_encoder *construct_encoder(struct mdp5_kms *mdp5_kms,
 
 static int get_dsi_id_from_intf(const struct mdp5_cfg_hw *hw_cfg, int intf_num)
 {
-       const int intf_cnt = hw_cfg->intf.count;
-       const u32 *intfs = hw_cfg->intfs;
+       const enum mdp5_intf_type *intfs = hw_cfg->intf.connect;
+       const int intf_cnt = ARRAY_SIZE(hw_cfg->intf.connect);
        int id = 0, i;
 
        for (i = 0; i < intf_cnt; i++) {
@@ -228,7 +228,7 @@ static int modeset_init_intf(struct mdp5_kms *mdp5_kms, int intf_num)
        struct msm_drm_private *priv = dev->dev_private;
        const struct mdp5_cfg_hw *hw_cfg =
                                        mdp5_cfg_get_hw_config(mdp5_kms->cfg);
-       enum mdp5_intf_type intf_type = hw_cfg->intfs[intf_num];
+       enum mdp5_intf_type intf_type = hw_cfg->intf.connect[intf_num];
        struct drm_encoder *encoder;
        int ret = 0;
 
@@ -365,7 +365,7 @@ static int modeset_init(struct mdp5_kms *mdp5_kms)
        /* Construct encoders and modeset initialize connector devices
         * for each external display interface.
         */
-       for (i = 0; i < ARRAY_SIZE(hw_cfg->intfs); i++) {
+       for (i = 0; i < ARRAY_SIZE(hw_cfg->intf.connect); i++) {
                ret = modeset_init_intf(mdp5_kms, i);
                if (ret)
                        goto fail;
@@ -514,8 +514,8 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
         */
        mdp5_enable(mdp5_kms);
        for (i = 0; i < MDP5_INTF_NUM_MAX; i++) {
-               if (!config->hw->intf.base[i] ||
-                               mdp5_cfg_intf_is_virtual(config->hw->intfs[i]))
+               if (mdp5_cfg_intf_is_virtual(config->hw->intf.connect[i]) ||
+                               !config->hw->intf.base[i])
                        continue;
                mdp5_write(mdp5_kms, REG_MDP5_INTF_TIMING_ENGINE_EN(i), 0);
        }
index 18a3d203b17439c2be3f0b0c72e033ed3996ae86..57b8f56ae9d06fb458266181a8344858e381e6b5 100644 (file)
@@ -273,7 +273,7 @@ static void set_scanout_locked(struct drm_plane *plane,
        mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
                        msm_framebuffer_iova(fb, mdp5_kms->id, 2));
        mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
-                       msm_framebuffer_iova(fb, mdp5_kms->id, 4));
+                       msm_framebuffer_iova(fb, mdp5_kms->id, 3));
 
        plane->fb = fb;
 }
index 47f4dd407671970fc247c4a9999dcccc03202c8e..c80a6bee2b18f373c9f2191bebbbb44e82db7663 100644 (file)
 
 static void msm_fb_output_poll_changed(struct drm_device *dev)
 {
+#ifdef CONFIG_DRM_MSM_FBDEV
        struct msm_drm_private *priv = dev->dev_private;
        if (priv->fbdev)
                drm_fb_helper_hotplug_event(priv->fbdev);
+#endif
 }
 
 static const struct drm_mode_config_funcs mode_config_funcs = {
@@ -94,7 +96,7 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
        }
 
        if (reglog)
-               printk(KERN_DEBUG "IO:region %s %08x %08lx\n", dbgname, (u32)ptr, size);
+               printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size);
 
        return ptr;
 }
@@ -102,7 +104,7 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
 void msm_writel(u32 data, void __iomem *addr)
 {
        if (reglog)
-               printk(KERN_DEBUG "IO:W %08x %08x\n", (u32)addr, data);
+               printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
        writel(data, addr);
 }
 
@@ -110,7 +112,7 @@ u32 msm_readl(const void __iomem *addr)
 {
        u32 val = readl(addr);
        if (reglog)
-               printk(KERN_ERR "IO:R %08x %08x\n", (u32)addr, val);
+               printk(KERN_ERR "IO:R %p %08x\n", addr, val);
        return val;
 }
 
@@ -143,8 +145,8 @@ static int msm_unload(struct drm_device *dev)
        if (gpu) {
                mutex_lock(&dev->struct_mutex);
                gpu->funcs->pm_suspend(gpu);
-               gpu->funcs->destroy(gpu);
                mutex_unlock(&dev->struct_mutex);
+               gpu->funcs->destroy(gpu);
        }
 
        if (priv->vram.paddr) {
@@ -177,7 +179,7 @@ static int get_mdp_ver(struct platform_device *pdev)
        const struct of_device_id *match;
        match = of_match_node(match_types, dev->of_node);
        if (match)
-               return (int)match->data;
+               return (int)(unsigned long)match->data;
 #endif
        return 4;
 }
@@ -216,7 +218,7 @@ static int msm_init_vram(struct drm_device *dev)
                if (ret)
                        return ret;
                size = r.end - r.start;
-               DRM_INFO("using VRAM carveout: %lx@%08x\n", size, r.start);
+               DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start);
        } else
 #endif
 
@@ -283,10 +285,6 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
 
        drm_mode_config_init(dev);
 
-       ret = msm_init_vram(dev);
-       if (ret)
-               goto fail;
-
        platform_set_drvdata(pdev, dev);
 
        /* Bind all our sub-components: */
@@ -294,6 +292,10 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
        if (ret)
                return ret;
 
+       ret = msm_init_vram(dev);
+       if (ret)
+               goto fail;
+
        switch (get_mdp_ver(pdev)) {
        case 4:
                kms = mdp4_kms_init(dev);
@@ -419,9 +421,11 @@ static void msm_preclose(struct drm_device *dev, struct drm_file *file)
 
 static void msm_lastclose(struct drm_device *dev)
 {
+#ifdef CONFIG_DRM_MSM_FBDEV
        struct msm_drm_private *priv = dev->dev_private;
        if (priv->fbdev)
                drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
+#endif
 }
 
 static irqreturn_t msm_irq(int irq, void *arg)
index 6b573e612f270bcbcc9e1d92cd3e3318fc80cb98..121713281417a92fc2270c7f907a082a1e0c4a87 100644 (file)
@@ -172,8 +172,8 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
 {
        struct msm_drm_private *priv = dev->dev_private;
        struct msm_kms *kms = priv->kms;
-       struct msm_framebuffer *msm_fb;
-       struct drm_framebuffer *fb = NULL;
+       struct msm_framebuffer *msm_fb = NULL;
+       struct drm_framebuffer *fb;
        const struct msm_format *format;
        int ret, i, n;
        unsigned int hsub, vsub;
@@ -239,8 +239,7 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
        return fb;
 
 fail:
-       if (fb)
-               msm_framebuffer_destroy(fb);
+       kfree(msm_fb);
 
        return ERR_PTR(ret);
 }
index 479d8af72bcb77d822ea92614ea3b91601e9ac6d..52839769eb6c091ba9f64a73e72d89191a3615cb 100644 (file)
@@ -483,7 +483,7 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
        uint64_t off = drm_vma_node_start(&obj->vma_node);
 
        WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-       seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %d\n",
+       seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %zu\n",
                        msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
                        msm_obj->read_fence, msm_obj->write_fence,
                        obj->name, obj->refcount.refcount.counter,
index 7acdaa5688b77e89f3afa786da19903d0d0c7b6d..7ac2f1997e4a4cbe4e57003f001928bba8a2c490 100644 (file)
@@ -60,7 +60,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
                u32 pa = sg_phys(sg) - sg->offset;
                size_t bytes = sg->length + sg->offset;
 
-               VERB("map[%d]: %08x %08x(%x)", i, iova, pa, bytes);
+               VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
 
                ret = iommu_map(domain, da, pa, bytes, prot);
                if (ret)
@@ -99,7 +99,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
                if (unmapped < bytes)
                        return unmapped;
 
-               VERB("unmap[%d]: %08x(%x)", i, iova, bytes);
+               VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
 
                BUG_ON(!PAGE_ALIGNED(bytes));
 
index 8171537dd7d127f9c104bb9417bf5b71a4bf91d8..1f14b908b22136117eb2b37179d05abfedce2b13 100644 (file)
@@ -56,6 +56,6 @@ fail:
 void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
 {
        if (ring->bo)
-               drm_gem_object_unreference(ring->bo);
+               drm_gem_object_unreference_unlocked(ring->bo);
        kfree(ring);
 }
index 0b5af0fe86598c613f923b2a74e1a81e065dbd89..64f8b2f687d29bb206ae1fe461706fbf1be3fe12 100644 (file)
@@ -14,7 +14,7 @@
 
 #define FERMI_TWOD_A                                                 0x0000902d
 
-#define FERMI_MEMORY_TO_MEMORY_FORMAT_A                              0x0000903d
+#define FERMI_MEMORY_TO_MEMORY_FORMAT_A                              0x00009039
 
 #define KEPLER_INLINE_TO_MEMORY_A                                    0x0000a040
 #define KEPLER_INLINE_TO_MEMORY_B                                    0x0000a140
index 2f5eadd12a9b611b5d6c790d1ea8bd39a7606736..fdb1dcf16a595ad6adabb27fd1db86c7b4797b9b 100644 (file)
@@ -329,7 +329,6 @@ gm204_gr_init(struct nvkm_object *object)
        nv_mask(priv, 0x419cc0, 0x00000008, 0x00000008);
 
        for (gpc = 0; gpc < priv->gpc_nr; gpc++) {
-       printk(KERN_ERR "ppc %d %d\n", gpc, priv->ppc_nr[gpc]);
                for (ppc = 0; ppc < priv->ppc_nr[gpc]; ppc++)
                        nv_wr32(priv, PPC_UNIT(gpc, ppc, 0x038), 0xc0000000);
                nv_wr32(priv, GPC_UNIT(gpc, 0x0420), 0xc0000000);
index e8778c67578ee41bcc490eca56b544137ea1ef53..c61102f708055ecf739077e1a1436dcd12ff70cb 100644 (file)
@@ -90,12 +90,14 @@ gf100_devinit_disable(struct nvkm_devinit *devinit)
        return disable;
 }
 
-static int
+int
 gf100_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
                   struct nvkm_oclass *oclass, void *data, u32 size,
                   struct nvkm_object **pobject)
 {
+       struct nvkm_devinit_impl *impl = (void *)oclass;
        struct nv50_devinit_priv *priv;
+       u64 disable;
        int ret;
 
        ret = nvkm_devinit_create(parent, engine, oclass, &priv);
@@ -103,7 +105,8 @@ gf100_devinit_ctor(struct nvkm_object *parent, struct nvkm_object *engine,
        if (ret)
                return ret;
 
-       if (nv_rd32(priv, 0x022500) & 0x00000001)
+       disable = impl->disable(&priv->base);
+       if (disable & (1ULL << NVDEV_ENGINE_DISP))
                priv->base.post = true;
 
        return 0;
index b345a53e881dc6e0f84fa188dfb32d50c90a675e..87ca0ece37b4209114ed36fb8260907e922d4ba4 100644 (file)
@@ -48,7 +48,7 @@ struct nvkm_oclass *
 gm107_devinit_oclass = &(struct nvkm_devinit_impl) {
        .base.handle = NV_SUBDEV(DEVINIT, 0x07),
        .base.ofuncs = &(struct nvkm_ofuncs) {
-               .ctor = nv50_devinit_ctor,
+               .ctor = gf100_devinit_ctor,
                .dtor = _nvkm_devinit_dtor,
                .init = nv50_devinit_init,
                .fini = _nvkm_devinit_fini,
index 535172c5f1ad0fda328eff542db8024afac00c02..1076fcf0d71614e89cf279234da7412557dc17ed 100644 (file)
@@ -161,7 +161,7 @@ struct nvkm_oclass *
 gm204_devinit_oclass = &(struct nvkm_devinit_impl) {
        .base.handle = NV_SUBDEV(DEVINIT, 0x07),
        .base.ofuncs = &(struct nvkm_ofuncs) {
-               .ctor = nv50_devinit_ctor,
+               .ctor = gf100_devinit_ctor,
                .dtor = _nvkm_devinit_dtor,
                .init = nv50_devinit_init,
                .fini = _nvkm_devinit_fini,
index b882b65ff3cd2031ae6e9ccf18b1987b972846d3..9243521c80ac22de306f8b1c7113260765ebd12a 100644 (file)
@@ -15,6 +15,9 @@ int  nv50_devinit_pll_set(struct nvkm_devinit *, u32, u32);
 
 int  gt215_devinit_pll_set(struct nvkm_devinit *, u32, u32);
 
+int  gf100_devinit_ctor(struct nvkm_object *, struct nvkm_object *,
+                       struct nvkm_oclass *, void *, u32,
+                       struct nvkm_object **);
 int  gf100_devinit_pll_set(struct nvkm_devinit *, u32, u32);
 
 u64  gm107_devinit_disable(struct nvkm_devinit *);
index 42b2ea3fdcf3584680235e6d5cab14942f7f84b1..dac78ad24b31558aa53d917fb802865b6a122b61 100644 (file)
@@ -580,9 +580,6 @@ static u32 atombios_adjust_pll(struct drm_crtc *crtc,
                else
                        radeon_crtc->pll_flags |= RADEON_PLL_PREFER_LOW_REF_DIV;
 
-               /* if there is no audio, set MINM_OVER_MAXP  */
-               if (!drm_detect_monitor_audio(radeon_connector_edid(connector)))
-                       radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
                if (rdev->family < CHIP_RV770)
                        radeon_crtc->pll_flags |= RADEON_PLL_PREFER_MINM_OVER_MAXP;
                /* use frac fb div on APUs */
index 3e3290c203c625d781f7dfacc13977c50c54d34b..b435c859dcbc3a76d7590f0cd1a6a785230037cf 100644 (file)
@@ -421,19 +421,21 @@ bool radeon_dp_getdpcd(struct radeon_connector *radeon_connector)
 {
        struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
        u8 msg[DP_DPCD_SIZE];
-       int ret;
+       int ret, i;
 
-       ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
-                              DP_DPCD_SIZE);
-       if (ret > 0) {
-               memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
+       for (i = 0; i < 7; i++) {
+               ret = drm_dp_dpcd_read(&radeon_connector->ddc_bus->aux, DP_DPCD_REV, msg,
+                                      DP_DPCD_SIZE);
+               if (ret == DP_DPCD_SIZE) {
+                       memcpy(dig_connector->dpcd, msg, DP_DPCD_SIZE);
 
-               DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
-                             dig_connector->dpcd);
+                       DRM_DEBUG_KMS("DPCD: %*ph\n", (int)sizeof(dig_connector->dpcd),
+                                     dig_connector->dpcd);
 
-               radeon_dp_probe_oui(radeon_connector);
+                       radeon_dp_probe_oui(radeon_connector);
 
-               return true;
+                       return true;
+               }
        }
        dig_connector->dpcd[0] = 0;
        return false;
index 28faea9996f9e111d6b35e547587025aa93c3089..ba50f3c1c2e0332024959c46ebb99a877e7893b1 100644 (file)
@@ -5837,7 +5837,7 @@ static int cik_pcie_gart_enable(struct radeon_device *rdev)
        /* restore context1-15 */
        /* set vm size, must be a multiple of 4 */
        WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
-       WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
+       WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1);
        for (i = 1; i < 16; i++) {
                if (i < 8)
                        WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
index f04205170b8a5942d73437ada72437bc18d028a8..cfa3a84a2af03c100741cb7e5b352781adf60b00 100644 (file)
@@ -173,7 +173,7 @@ void dce3_2_hdmi_update_acr(struct drm_encoder *encoder, long offset,
        struct drm_device *dev = encoder->dev;
        struct radeon_device *rdev = dev->dev_private;
 
-       WREG32(HDMI0_ACR_PACKET_CONTROL + offset,
+       WREG32(DCE3_HDMI0_ACR_PACKET_CONTROL + offset,
                HDMI0_ACR_SOURCE |              /* select SW CTS value */
                HDMI0_ACR_AUTO_SEND);   /* allow hw to sent ACR packets when required */
 
index 0926739c9fa7c40d17dd09bb318cc19c4ca194c1..9953356fe2637cfacdc2ba41e8ecd082d65213ff 100644 (file)
@@ -400,7 +400,7 @@ void evergreen_hdmi_enable(struct drm_encoder *encoder, bool enable)
        if (enable) {
                struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
 
-               if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+               if (connector && drm_detect_monitor_audio(radeon_connector_edid(connector))) {
                        WREG32(HDMI_INFOFRAME_CONTROL0 + dig->afmt->offset,
                               HDMI_AVI_INFO_SEND | /* enable AVI info frames */
                               HDMI_AVI_INFO_CONT | /* required for audio info values to be updated */
@@ -438,7 +438,8 @@ void evergreen_dp_enable(struct drm_encoder *encoder, bool enable)
        if (!dig || !dig->afmt)
                return;
 
-       if (enable && drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+       if (enable && connector &&
+           drm_detect_monitor_audio(radeon_connector_edid(connector))) {
                struct drm_connector *connector = radeon_get_connector_for_encoder(encoder);
                struct radeon_connector *radeon_connector = to_radeon_connector(connector);
                struct radeon_connector_atom_dig *dig_connector;
index e8a496ff007ee680d30a2bd688f30d094b58461c..64d3a771920db8a57a04cd343ba210d8ee2fefc7 100644 (file)
@@ -1301,7 +1301,8 @@ static int cayman_pcie_gart_enable(struct radeon_device *rdev)
         */
        for (i = 1; i < 8; i++) {
                WREG32(VM_CONTEXT0_PAGE_TABLE_START_ADDR + (i << 2), 0);
-               WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2), rdev->vm_manager.max_pfn);
+               WREG32(VM_CONTEXT0_PAGE_TABLE_END_ADDR + (i << 2),
+                       rdev->vm_manager.max_pfn - 1);
                WREG32(VM_CONTEXT0_PAGE_TABLE_BASE_ADDR + (i << 2),
                       rdev->vm_manager.saved_table_addr[i]);
        }
index d2abe481954fc14a8146c0f9db0f0a6e73816900..46eb0fa75a614307286446a99d7c1c2037973ab4 100644 (file)
@@ -1673,7 +1673,6 @@ struct radeon_uvd {
        struct radeon_bo        *vcpu_bo;
        void                    *cpu_addr;
        uint64_t                gpu_addr;
-       void                    *saved_bo;
        atomic_t                handles[RADEON_MAX_UVD_HANDLES];
        struct drm_file         *filp[RADEON_MAX_UVD_HANDLES];
        unsigned                img_size[RADEON_MAX_UVD_HANDLES];
index fafd8ce4d58fc6a844b9615aa3b013cf793123c6..8dbf5083c4ff795498e619d798890bcaa4dca1e1 100644 (file)
@@ -1202,7 +1202,7 @@ static struct radeon_asic rs780_asic = {
 static struct radeon_asic_ring rv770_uvd_ring = {
        .ib_execute = &uvd_v1_0_ib_execute,
        .emit_fence = &uvd_v2_2_fence_emit,
-       .emit_semaphore = &uvd_v1_0_semaphore_emit,
+       .emit_semaphore = &uvd_v2_2_semaphore_emit,
        .cs_parse = &radeon_uvd_cs_parse,
        .ring_test = &uvd_v1_0_ring_test,
        .ib_test = &uvd_v1_0_ib_test,
index cf0a90bb61cab3a7bce074a931440a3d12cfa115..a3ca8cd305c5c21541bae20820dbe3366f416706 100644 (file)
@@ -949,6 +949,10 @@ void uvd_v1_0_ib_execute(struct radeon_device *rdev, struct radeon_ib *ib);
 int uvd_v2_2_resume(struct radeon_device *rdev);
 void uvd_v2_2_fence_emit(struct radeon_device *rdev,
                         struct radeon_fence *fence);
+bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev,
+                            struct radeon_ring *ring,
+                            struct radeon_semaphore *semaphore,
+                            bool emit_wait);
 
 /* uvd v3.1 */
 bool uvd_v3_1_semaphore_emit(struct radeon_device *rdev,
index 8b82abb78df159d877f27c1ec67cd7502010634d..25191f126f3bb63dffc5f812edf0b9fec335600b 100644 (file)
@@ -460,33 +460,34 @@ void radeon_audio_detect(struct drm_connector *connector,
        if (!connector || !connector->encoder)
                return;
 
-       if (!radeon_encoder_is_digital(connector->encoder))
+       rdev = connector->encoder->dev->dev_private;
+
+       if (!radeon_audio_chipset_supported(rdev))
                return;
 
-       rdev = connector->encoder->dev->dev_private;
        radeon_encoder = to_radeon_encoder(connector->encoder);
        dig = radeon_encoder->enc_priv;
 
-       if (!dig->afmt)
-               return;
-
        if (status == connector_status_connected) {
-               struct radeon_connector *radeon_connector = to_radeon_connector(connector);
+               struct radeon_connector *radeon_connector;
+               int sink_type;
+
+               if (!drm_detect_monitor_audio(radeon_connector_edid(connector))) {
+                       radeon_encoder->audio = NULL;
+                       return;
+               }
+
+               radeon_connector = to_radeon_connector(connector);
+               sink_type = radeon_dp_getsinktype(radeon_connector);
 
                if (connector->connector_type == DRM_MODE_CONNECTOR_DisplayPort &&
-                   radeon_dp_getsinktype(radeon_connector) ==
-                   CONNECTOR_OBJECT_ID_DISPLAYPORT)
+                       sink_type == CONNECTOR_OBJECT_ID_DISPLAYPORT)
                        radeon_encoder->audio = rdev->audio.dp_funcs;
                else
                        radeon_encoder->audio = rdev->audio.hdmi_funcs;
 
                dig->afmt->pin = radeon_audio_get_pin(connector->encoder);
-               if (drm_detect_monitor_audio(radeon_connector_edid(connector))) {
-                       radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
-               } else {
-                       radeon_audio_enable(rdev, dig->afmt->pin, 0);
-                       dig->afmt->pin = NULL;
-               }
+               radeon_audio_enable(rdev, dig->afmt->pin, 0xf);
        } else {
                radeon_audio_enable(rdev, dig->afmt->pin, 0);
                dig->afmt->pin = NULL;
index d17d251dbd4fe5a1ac99a238fd7aebcf552682ba..cebb65e07e1d13f0bee01ee753f4c8a76e0e22d5 100644 (file)
@@ -1379,10 +1379,8 @@ out:
        /* updated in get modes as well since we need to know if it's analog or digital */
        radeon_connector_update_scratch_regs(connector, ret);
 
-       if (radeon_audio != 0) {
-               radeon_connector_get_edid(connector);
+       if (radeon_audio != 0)
                radeon_audio_detect(connector, ret);
-       }
 
 exit:
        pm_runtime_mark_last_busy(connector->dev->dev);
@@ -1719,10 +1717,8 @@ radeon_dp_detect(struct drm_connector *connector, bool force)
 
        radeon_connector_update_scratch_regs(connector, ret);
 
-       if (radeon_audio != 0) {
-               radeon_connector_get_edid(connector);
+       if (radeon_audio != 0)
                radeon_audio_detect(connector, ret);
-       }
 
 out:
        pm_runtime_mark_last_busy(connector->dev->dev);
index b7ca4c51462120fab3ab146dd74f653e8bcb91cb..a7fdfa4f0857b3a416e67d79007a1da731455b80 100644 (file)
@@ -1463,6 +1463,21 @@ int radeon_device_init(struct radeon_device *rdev,
        if (r)
                DRM_ERROR("ib ring test failed (%d).\n", r);
 
+       /*
+        * Turks/Thames GPU will freeze whole laptop if DPM is not restarted
+        * after the CP ring have chew one packet at least. Hence here we stop
+        * and restart DPM after the radeon_ib_ring_tests().
+        */
+       if (rdev->pm.dpm_enabled &&
+           (rdev->pm.pm_method == PM_METHOD_DPM) &&
+           (rdev->family == CHIP_TURKS) &&
+           (rdev->flags & RADEON_IS_MOBILITY)) {
+               mutex_lock(&rdev->pm.mutex);
+               radeon_dpm_disable(rdev);
+               radeon_dpm_enable(rdev);
+               mutex_unlock(&rdev->pm.mutex);
+       }
+
        if ((radeon_testing & 1)) {
                if (rdev->accel_working)
                        radeon_test_moves(rdev);
index bf1fecc6cceb2cba743bce902f26598f7156d03a..fcbd60bb03495740d435b7a33521ff698b171c40 100644 (file)
@@ -30,8 +30,6 @@
                            AUX_SW_RX_HPD_DISCON |           \
                            AUX_SW_RX_PARTIAL_BYTE |         \
                            AUX_SW_NON_AUX_MODE |            \
-                           AUX_SW_RX_MIN_COUNT_VIOL |       \
-                           AUX_SW_RX_INVALID_STOP |         \
                            AUX_SW_RX_SYNC_INVALID_L |       \
                            AUX_SW_RX_SYNC_INVALID_H |       \
                            AUX_SW_RX_INVALID_START |        \
index 1017338a49d9f49aaad6dc594d6ec1bef8a5f111..257b10be5cda902861339d9fde17c38e4f238d06 100644 (file)
@@ -663,9 +663,17 @@ int
 radeon_dp_mst_probe(struct radeon_connector *radeon_connector)
 {
        struct radeon_connector_atom_dig *dig_connector = radeon_connector->con_priv;
+       struct drm_device *dev = radeon_connector->base.dev;
+       struct radeon_device *rdev = dev->dev_private;
        int ret;
        u8 msg[1];
 
+       if (!radeon_mst)
+               return 0;
+
+       if (!ASIC_IS_DCE5(rdev))
+               return 0;
+
        if (dig_connector->dpcd[DP_DPCD_REV] < 0x12)
                return 0;
 
index 7b2a7335cc5d557eafa6864d50cb6ebc9cdfb5ff..b0acf50d95581d9970cef89690be25b32324c7b3 100644 (file)
@@ -576,6 +576,9 @@ static int radeon_info_ioctl(struct drm_device *dev, void *data, struct drm_file
                if (radeon_get_allowed_info_register(rdev, *value, value))
                        return -EINVAL;
                break;
+       case RADEON_INFO_VA_UNMAP_WORKING:
+               *value = true;
+               break;
        default:
                DRM_DEBUG_KMS("Invalid request %d\n", info->request);
                return -EINVAL;
index 535bf404b725f8aea3f58ddcb5a082b992212329..eef006c4858499dd207c6eb70ad5fcbc05c53941 100644 (file)
@@ -142,6 +142,9 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
 
                list_for_each_entry(bo, &node->bos, mn_list) {
 
+                       if (!bo->tbo.ttm || bo->tbo.ttm->state != tt_bound)
+                               continue;
+
                        r = radeon_bo_reserve(bo, true);
                        if (r) {
                                DRM_ERROR("(%ld) failed to reserve user bo\n", r);
index b292aca0f342d53856ec3eaf982b71fd8b0a7fa8..edafd3c2b17028a73ff5128568c73adfaff0f85b 100644 (file)
@@ -591,8 +591,7 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
 {
        struct radeon_device *rdev = radeon_get_rdev(ttm->bdev);
        struct radeon_ttm_tt *gtt = (void *)ttm;
-       struct scatterlist *sg;
-       int i;
+       struct sg_page_iter sg_iter;
 
        int write = !(gtt->userflags & RADEON_GEM_USERPTR_READONLY);
        enum dma_data_direction direction = write ?
@@ -605,9 +604,8 @@ static void radeon_ttm_tt_unpin_userptr(struct ttm_tt *ttm)
        /* free the sg table and pages again */
        dma_unmap_sg(rdev->dev, ttm->sg->sgl, ttm->sg->nents, direction);
 
-       for_each_sg(ttm->sg->sgl, sg, ttm->sg->nents, i) {
-               struct page *page = sg_page(sg);
-
+       for_each_sg_page(ttm->sg->sgl, &sg_iter, ttm->sg->nents, 0) {
+               struct page *page = sg_page_iter_page(&sg_iter);
                if (!(gtt->userflags & RADEON_GEM_USERPTR_READONLY))
                        set_page_dirty(page);
 
index c10b2aec6450fa8ceb366a691ea0898aff14dcbc..6edcb54850922a87535a3440a24d0e316977cf05 100644 (file)
@@ -204,28 +204,32 @@ void radeon_uvd_fini(struct radeon_device *rdev)
 
 int radeon_uvd_suspend(struct radeon_device *rdev)
 {
-       unsigned size;
-       void *ptr;
-       int i;
+       int i, r;
 
        if (rdev->uvd.vcpu_bo == NULL)
                return 0;
 
-       for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
-               if (atomic_read(&rdev->uvd.handles[i]))
-                       break;
+       for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+               uint32_t handle = atomic_read(&rdev->uvd.handles[i]);
+               if (handle != 0) {
+                       struct radeon_fence *fence;
 
-       if (i == RADEON_MAX_UVD_HANDLES)
-               return 0;
+                       radeon_uvd_note_usage(rdev);
 
-       size = radeon_bo_size(rdev->uvd.vcpu_bo);
-       size -= rdev->uvd_fw->size;
+                       r = radeon_uvd_get_destroy_msg(rdev,
+                               R600_RING_TYPE_UVD_INDEX, handle, &fence);
+                       if (r) {
+                               DRM_ERROR("Error destroying UVD (%d)!\n", r);
+                               continue;
+                       }
 
-       ptr = rdev->uvd.cpu_addr;
-       ptr += rdev->uvd_fw->size;
+                       radeon_fence_wait(fence, false);
+                       radeon_fence_unref(&fence);
 
-       rdev->uvd.saved_bo = kmalloc(size, GFP_KERNEL);
-       memcpy(rdev->uvd.saved_bo, ptr, size);
+                       rdev->uvd.filp[i] = NULL;
+                       atomic_set(&rdev->uvd.handles[i], 0);
+               }
+       }
 
        return 0;
 }
@@ -246,12 +250,7 @@ int radeon_uvd_resume(struct radeon_device *rdev)
        ptr = rdev->uvd.cpu_addr;
        ptr += rdev->uvd_fw->size;
 
-       if (rdev->uvd.saved_bo != NULL) {
-               memcpy(ptr, rdev->uvd.saved_bo, size);
-               kfree(rdev->uvd.saved_bo);
-               rdev->uvd.saved_bo = NULL;
-       } else
-               memset(ptr, 0, size);
+       memset(ptr, 0, size);
 
        return 0;
 }
@@ -396,6 +395,29 @@ static int radeon_uvd_cs_msg_decode(uint32_t *msg, unsigned buf_sizes[])
        return 0;
 }
 
+static int radeon_uvd_validate_codec(struct radeon_cs_parser *p,
+                                    unsigned stream_type)
+{
+       switch (stream_type) {
+       case 0: /* H264 */
+       case 1: /* VC1 */
+               /* always supported */
+               return 0;
+
+       case 3: /* MPEG2 */
+       case 4: /* MPEG4 */
+               /* only since UVD 3 */
+               if (p->rdev->family >= CHIP_PALM)
+                       return 0;
+
+               /* fall through */
+       default:
+               DRM_ERROR("UVD codec not supported by hardware %d!\n",
+                         stream_type);
+               return -EINVAL;
+       }
+}
+
 static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
                             unsigned offset, unsigned buf_sizes[])
 {
@@ -436,50 +458,70 @@ static int radeon_uvd_cs_msg(struct radeon_cs_parser *p, struct radeon_bo *bo,
                return -EINVAL;
        }
 
-       if (msg_type == 1) {
-               /* it's a decode msg, calc buffer sizes */
-               r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
-               /* calc image size (width * height) */
-               img_size = msg[6] * msg[7];
+       switch (msg_type) {
+       case 0:
+               /* it's a create msg, calc image size (width * height) */
+               img_size = msg[7] * msg[8];
+
+               r = radeon_uvd_validate_codec(p, msg[4]);
                radeon_bo_kunmap(bo);
                if (r)
                        return r;
 
-       } else if (msg_type == 2) {
+               /* try to alloc a new handle */
+               for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+                       if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
+                               DRM_ERROR("Handle 0x%x already in use!\n", handle);
+                               return -EINVAL;
+                       }
+
+                       if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
+                               p->rdev->uvd.filp[i] = p->filp;
+                               p->rdev->uvd.img_size[i] = img_size;
+                               return 0;
+                       }
+               }
+
+               DRM_ERROR("No more free UVD handles!\n");
+               return -EINVAL;
+
+       case 1:
+               /* it's a decode msg, validate codec and calc buffer sizes */
+               r = radeon_uvd_validate_codec(p, msg[4]);
+               if (!r)
+                       r = radeon_uvd_cs_msg_decode(msg, buf_sizes);
+               radeon_bo_kunmap(bo);
+               if (r)
+                       return r;
+
+               /* validate the handle */
+               for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
+                       if (atomic_read(&p->rdev->uvd.handles[i]) == handle) {
+                               if (p->rdev->uvd.filp[i] != p->filp) {
+                                       DRM_ERROR("UVD handle collision detected!\n");
+                                       return -EINVAL;
+                               }
+                               return 0;
+                       }
+               }
+
+               DRM_ERROR("Invalid UVD handle 0x%x!\n", handle);
+               return -ENOENT;
+
+       case 2:
                /* it's a destroy msg, free the handle */
                for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i)
                        atomic_cmpxchg(&p->rdev->uvd.handles[i], handle, 0);
                radeon_bo_kunmap(bo);
                return 0;
-       } else {
-               /* it's a create msg, calc image size (width * height) */
-               img_size = msg[7] * msg[8];
-               radeon_bo_kunmap(bo);
 
-               if (msg_type != 0) {
-                       DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
-                       return -EINVAL;
-               }
-
-               /* it's a create msg, no special handling needed */
-       }
-
-       /* create or decode, validate the handle */
-       for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
-               if (atomic_read(&p->rdev->uvd.handles[i]) == handle)
-                       return 0;
-       }
+       default:
 
-       /* handle not found try to alloc a new one */
-       for (i = 0; i < RADEON_MAX_UVD_HANDLES; ++i) {
-               if (!atomic_cmpxchg(&p->rdev->uvd.handles[i], 0, handle)) {
-                       p->rdev->uvd.filp[i] = p->filp;
-                       p->rdev->uvd.img_size[i] = img_size;
-                       return 0;
-               }
+               DRM_ERROR("Illegal UVD message type (%d)!\n", msg_type);
+               return -EINVAL;
        }
 
-       DRM_ERROR("No more free UVD handles!\n");
+       BUG();
        return -EINVAL;
 }
 
index 24f849f888bbdf93c6518a271105f8bb0d302368..0de5711ac508842b2a1d70550c755f0f85cd6d71 100644 (file)
@@ -493,18 +493,27 @@ int radeon_vce_cs_reloc(struct radeon_cs_parser *p, int lo, int hi,
  *
  * @p: parser context
  * @handle: handle to validate
+ * @allocated: allocated a new handle?
  *
  * Validates the handle and return the found session index or -EINVAL
  * we we don't have another free session index.
  */
-int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
+static int radeon_vce_validate_handle(struct radeon_cs_parser *p,
+                                     uint32_t handle, bool *allocated)
 {
        unsigned i;
 
+       *allocated = false;
+
        /* validate the handle */
        for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i) {
-               if (atomic_read(&p->rdev->vce.handles[i]) == handle)
+               if (atomic_read(&p->rdev->vce.handles[i]) == handle) {
+                       if (p->rdev->vce.filp[i] != p->filp) {
+                               DRM_ERROR("VCE handle collision detected!\n");
+                               return -EINVAL;
+                       }
                        return i;
+               }
        }
 
        /* handle not found try to alloc a new one */
@@ -512,6 +521,7 @@ int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
                if (!atomic_cmpxchg(&p->rdev->vce.handles[i], 0, handle)) {
                        p->rdev->vce.filp[i] = p->filp;
                        p->rdev->vce.img_size[i] = 0;
+                       *allocated = true;
                        return i;
                }
        }
@@ -529,10 +539,10 @@ int radeon_vce_validate_handle(struct radeon_cs_parser *p, uint32_t handle)
 int radeon_vce_cs_parse(struct radeon_cs_parser *p)
 {
        int session_idx = -1;
-       bool destroyed = false;
+       bool destroyed = false, created = false, allocated = false;
        uint32_t tmp, handle = 0;
        uint32_t *size = &tmp;
-       int i, r;
+       int i, r = 0;
 
        while (p->idx < p->chunk_ib->length_dw) {
                uint32_t len = radeon_get_ib_value(p, p->idx);
@@ -540,18 +550,21 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
 
                if ((len < 8) || (len & 3)) {
                        DRM_ERROR("invalid VCE command length (%d)!\n", len);
-                       return -EINVAL;
+                       r = -EINVAL;
+                       goto out;
                }
 
                if (destroyed) {
                        DRM_ERROR("No other command allowed after destroy!\n");
-                       return -EINVAL;
+                       r = -EINVAL;
+                       goto out;
                }
 
                switch (cmd) {
                case 0x00000001: // session
                        handle = radeon_get_ib_value(p, p->idx + 2);
-                       session_idx = radeon_vce_validate_handle(p, handle);
+                       session_idx = radeon_vce_validate_handle(p, handle,
+                                                                &allocated);
                        if (session_idx < 0)
                                return session_idx;
                        size = &p->rdev->vce.img_size[session_idx];
@@ -561,6 +574,13 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
                        break;
 
                case 0x01000001: // create
+                       created = true;
+                       if (!allocated) {
+                               DRM_ERROR("Handle already in use!\n");
+                               r = -EINVAL;
+                               goto out;
+                       }
+
                        *size = radeon_get_ib_value(p, p->idx + 8) *
                                radeon_get_ib_value(p, p->idx + 10) *
                                8 * 3 / 2;
@@ -578,12 +598,12 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
                        r = radeon_vce_cs_reloc(p, p->idx + 10, p->idx + 9,
                                                *size);
                        if (r)
-                               return r;
+                               goto out;
 
                        r = radeon_vce_cs_reloc(p, p->idx + 12, p->idx + 11,
                                                *size / 3);
                        if (r)
-                               return r;
+                               goto out;
                        break;
 
                case 0x02000001: // destroy
@@ -594,7 +614,7 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
                        r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
                                                *size * 2);
                        if (r)
-                               return r;
+                               goto out;
                        break;
 
                case 0x05000004: // video bitstream buffer
@@ -602,36 +622,47 @@ int radeon_vce_cs_parse(struct radeon_cs_parser *p)
                        r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
                                                tmp);
                        if (r)
-                               return r;
+                               goto out;
                        break;
 
                case 0x05000005: // feedback buffer
                        r = radeon_vce_cs_reloc(p, p->idx + 3, p->idx + 2,
                                                4096);
                        if (r)
-                               return r;
+                               goto out;
                        break;
 
                default:
                        DRM_ERROR("invalid VCE command (0x%x)!\n", cmd);
-                       return -EINVAL;
+                       r = -EINVAL;
+                       goto out;
                }
 
                if (session_idx == -1) {
                        DRM_ERROR("no session command at start of IB\n");
-                       return -EINVAL;
+                       r = -EINVAL;
+                       goto out;
                }
 
                p->idx += len / 4;
        }
 
-       if (destroyed) {
-               /* IB contains a destroy msg, free the handle */
+       if (allocated && !created) {
+               DRM_ERROR("New session without create command!\n");
+               r = -ENOENT;
+       }
+
+out:
+       if ((!r && destroyed) || (r && allocated)) {
+               /*
+                * IB contains a destroy msg or we have allocated an
+                * handle and got an error, anyway free the handle
+                */
                for (i = 0; i < RADEON_MAX_VCE_HANDLES; ++i)
                        atomic_cmpxchg(&p->rdev->vce.handles[i], handle, 0);
        }
 
-       return 0;
+       return r;
 }
 
 /**
index de42fc4a22b869296ff44c85c859678c6155ddd7..9c3377ca17b75ecd2092e4fd78a2238c126d88f1 100644 (file)
@@ -458,14 +458,16 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
                /* make sure object fit at this offset */
                eoffset = soffset + size;
                if (soffset >= eoffset) {
-                       return -EINVAL;
+                       r = -EINVAL;
+                       goto error_unreserve;
                }
 
                last_pfn = eoffset / RADEON_GPU_PAGE_SIZE;
                if (last_pfn > rdev->vm_manager.max_pfn) {
                        dev_err(rdev->dev, "va above limit (0x%08X > 0x%08X)\n",
                                last_pfn, rdev->vm_manager.max_pfn);
-                       return -EINVAL;
+                       r = -EINVAL;
+                       goto error_unreserve;
                }
 
        } else {
@@ -486,7 +488,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
                                "(bo %p 0x%010lx 0x%010lx)\n", bo_va->bo,
                                soffset, tmp->bo, tmp->it.start, tmp->it.last);
                        mutex_unlock(&vm->mutex);
-                       return -EINVAL;
+                       r = -EINVAL;
+                       goto error_unreserve;
                }
        }
 
@@ -497,7 +500,8 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
                        tmp = kzalloc(sizeof(struct radeon_bo_va), GFP_KERNEL);
                        if (!tmp) {
                                mutex_unlock(&vm->mutex);
-                               return -ENOMEM;
+                               r = -ENOMEM;
+                               goto error_unreserve;
                        }
                        tmp->it.start = bo_va->it.start;
                        tmp->it.last = bo_va->it.last;
@@ -555,7 +559,6 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
                r = radeon_vm_clear_bo(rdev, pt);
                if (r) {
                        radeon_bo_unref(&pt);
-                       radeon_bo_reserve(bo_va->bo, false);
                        return r;
                }
 
@@ -575,6 +578,10 @@ int radeon_vm_bo_set_addr(struct radeon_device *rdev,
 
        mutex_unlock(&vm->mutex);
        return 0;
+
+error_unreserve:
+       radeon_bo_unreserve(bo_va->bo);
+       return r;
 }
 
 /**
index 3cf1e2921545f9a980569925088d720e347505d1..9ef2064b1c9cdfc0392c6154258366387a1bf95a 100644 (file)
                         ((n) & 0x3FFF) << 16)
 
 /* UVD */
+#define UVD_SEMA_ADDR_LOW                              0xef00
+#define UVD_SEMA_ADDR_HIGH                             0xef04
+#define UVD_SEMA_CMD                                   0xef08
 #define UVD_GPCOM_VCPU_CMD                             0xef0c
 #define UVD_GPCOM_VCPU_DATA0                           0xef10
 #define UVD_GPCOM_VCPU_DATA1                           0xef14
index b1d74bc375d82f665dbb4455db5aa84db0c8d8dc..4c679b802bc851db50450ee759c3d3f314d0bfdf 100644 (file)
@@ -4318,7 +4318,7 @@ static int si_pcie_gart_enable(struct radeon_device *rdev)
        /* empty context1-15 */
        /* set vm size, must be a multiple of 4 */
        WREG32(VM_CONTEXT1_PAGE_TABLE_START_ADDR, 0);
-       WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn);
+       WREG32(VM_CONTEXT1_PAGE_TABLE_END_ADDR, rdev->vm_manager.max_pfn - 1);
        /* Assign the pt base to something valid for now; the pts used for
         * the VMs are determined by the application and setup and assigned
         * on the fly in the vm part of radeon_gart.c
index e72b3cb593589876cc8999e8ab913cbf886b64f3..c6b1cbca47fc8e2b423a52fd4affe6d9532678af 100644 (file)
@@ -466,18 +466,8 @@ bool uvd_v1_0_semaphore_emit(struct radeon_device *rdev,
                             struct radeon_semaphore *semaphore,
                             bool emit_wait)
 {
-       uint64_t addr = semaphore->gpu_addr;
-
-       radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
-       radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
-
-       radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
-       radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
-
-       radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
-       radeon_ring_write(ring, emit_wait ? 1 : 0);
-
-       return true;
+       /* disable semaphores for UVD V1 hardware */
+       return false;
 }
 
 /**
index 89193519f8a1faf32fd7fe9013ddd8107cafd977..7ed778cec7c6400206674f9c8e2264734dba4b0a 100644 (file)
@@ -59,6 +59,35 @@ void uvd_v2_2_fence_emit(struct radeon_device *rdev,
        radeon_ring_write(ring, 2);
 }
 
+/**
+ * uvd_v2_2_semaphore_emit - emit semaphore command
+ *
+ * @rdev: radeon_device pointer
+ * @ring: radeon_ring pointer
+ * @semaphore: semaphore to emit commands for
+ * @emit_wait: true if we should emit a wait command
+ *
+ * Emit a semaphore command (either wait or signal) to the UVD ring.
+ */
+bool uvd_v2_2_semaphore_emit(struct radeon_device *rdev,
+                            struct radeon_ring *ring,
+                            struct radeon_semaphore *semaphore,
+                            bool emit_wait)
+{
+       uint64_t addr = semaphore->gpu_addr;
+
+       radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_LOW, 0));
+       radeon_ring_write(ring, (addr >> 3) & 0x000FFFFF);
+
+       radeon_ring_write(ring, PACKET0(UVD_SEMA_ADDR_HIGH, 0));
+       radeon_ring_write(ring, (addr >> 23) & 0x000FFFFF);
+
+       radeon_ring_write(ring, PACKET0(UVD_SEMA_CMD, 0));
+       radeon_ring_write(ring, emit_wait ? 1 : 0);
+
+       return true;
+}
+
 /**
  * uvd_v2_2_resume - memory controller programming
  *
index 1833abd7d3aafa38c796cc5acb7d3bb9a8356c86..bfad15a913a023b0f0f3272118fe822fc9097d0c 100644 (file)
@@ -173,7 +173,6 @@ static int tegra_drm_load(struct drm_device *drm, unsigned long flags)
        drm->irq_enabled = true;
 
        /* syncpoints are used for full 32-bit hardware VBLANK counters */
-       drm->vblank_disable_immediate = true;
        drm->max_vblank_count = 0xffffffff;
 
        err = drm_vblank_init(drm, drm->mode_config.num_crtc);
index 1055cb79096c6b508d568fd3c07255b05bf8b56c..3f4c7b8420287188288641e66b9f82addd5b21f3 100644 (file)
@@ -1,4 +1,4 @@
 ccflags-y := -Iinclude/drm
-vgem-y := vgem_drv.o vgem_dma_buf.o
+vgem-y := vgem_drv.o
 
 obj-$(CONFIG_DRM_VGEM) += vgem.o
diff --git a/drivers/gpu/drm/vgem/vgem_dma_buf.c b/drivers/gpu/drm/vgem/vgem_dma_buf.c
deleted file mode 100644 (file)
index 0254438..0000000
+++ /dev/null
@@ -1,94 +0,0 @@
-/*
- * Copyright Â© 2012 Intel Corporation
- * Copyright Â© 2014 The Chromium OS Authors
- *
- * Permission is hereby granted, free of charge, to any person obtaining a
- * copy of this software and associated documentation files (the "Software"),
- * to deal in the Software without restriction, including without limitation
- * the rights to use, copy, modify, merge, publish, distribute, sublicense,
- * and/or sell copies of the Software, and to permit persons to whom the
- * Software is furnished to do so, subject to the following conditions:
- *
- * The above copyright notice and this permission notice (including the next
- * paragraph) shall be included in all copies or substantial portions of the
- * Software.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
- * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
- * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
- * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
- * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
- * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
- * IN THE SOFTWARE.
- *
- * Authors:
- *    Ben Widawsky <ben@bwidawsk.net>
- *
- */
-
-#include <linux/dma-buf.h>
-#include "vgem_drv.h"
-
-struct sg_table *vgem_gem_prime_get_sg_table(struct drm_gem_object *gobj)
-{
-       struct drm_vgem_gem_object *obj = to_vgem_bo(gobj);
-       BUG_ON(obj->pages == NULL);
-
-       return drm_prime_pages_to_sg(obj->pages, obj->base.size / PAGE_SIZE);
-}
-
-int vgem_gem_prime_pin(struct drm_gem_object *gobj)
-{
-       struct drm_vgem_gem_object *obj = to_vgem_bo(gobj);
-       return vgem_gem_get_pages(obj);
-}
-
-void vgem_gem_prime_unpin(struct drm_gem_object *gobj)
-{
-       struct drm_vgem_gem_object *obj = to_vgem_bo(gobj);
-       vgem_gem_put_pages(obj);
-}
-
-void *vgem_gem_prime_vmap(struct drm_gem_object *gobj)
-{
-       struct drm_vgem_gem_object *obj = to_vgem_bo(gobj);
-       BUG_ON(obj->pages == NULL);
-
-       return vmap(obj->pages, obj->base.size / PAGE_SIZE, 0, PAGE_KERNEL);
-}
-
-void vgem_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
-{
-       vunmap(vaddr);
-}
-
-struct drm_gem_object *vgem_gem_prime_import(struct drm_device *dev,
-                                            struct dma_buf *dma_buf)
-{
-       struct drm_vgem_gem_object *obj = NULL;
-       int ret;
-
-       obj = kzalloc(sizeof(*obj), GFP_KERNEL);
-       if (obj == NULL) {
-               ret = -ENOMEM;
-               goto fail;
-       }
-
-       ret = drm_gem_object_init(dev, &obj->base, dma_buf->size);
-       if (ret) {
-               ret = -ENOMEM;
-               goto fail_free;
-       }
-
-       get_dma_buf(dma_buf);
-
-       obj->base.dma_buf = dma_buf;
-       obj->use_dma_buf = true;
-
-       return &obj->base;
-
-fail_free:
-       kfree(obj);
-fail:
-       return ERR_PTR(ret);
-}
index cb3b43525b2de3e512c2000dec12b1e92c923e2c..7a207ca547be24011fc0b89284ed7e728ded26f8 100644 (file)
@@ -302,22 +302,13 @@ static const struct file_operations vgem_driver_fops = {
 };
 
 static struct drm_driver vgem_driver = {
-       .driver_features                = DRIVER_GEM | DRIVER_PRIME,
+       .driver_features                = DRIVER_GEM,
        .gem_free_object                = vgem_gem_free_object,
        .gem_vm_ops                     = &vgem_gem_vm_ops,
        .ioctls                         = vgem_ioctls,
        .fops                           = &vgem_driver_fops,
        .dumb_create                    = vgem_gem_dumb_create,
        .dumb_map_offset                = vgem_gem_dumb_map,
-       .prime_handle_to_fd             = drm_gem_prime_handle_to_fd,
-       .prime_fd_to_handle             = drm_gem_prime_fd_to_handle,
-       .gem_prime_export               = drm_gem_prime_export,
-       .gem_prime_import               = vgem_gem_prime_import,
-       .gem_prime_pin                  = vgem_gem_prime_pin,
-       .gem_prime_unpin                = vgem_gem_prime_unpin,
-       .gem_prime_get_sg_table         = vgem_gem_prime_get_sg_table,
-       .gem_prime_vmap                 = vgem_gem_prime_vmap,
-       .gem_prime_vunmap               = vgem_gem_prime_vunmap,
        .name   = DRIVER_NAME,
        .desc   = DRIVER_DESC,
        .date   = DRIVER_DATE,
index 57ab4d8f41f92b083299d786d62f0ebca00577a1..e9f92f7ee275cf791b8f81d8a53c1a8f3c45ba8e 100644 (file)
@@ -43,15 +43,4 @@ struct drm_vgem_gem_object {
 extern void vgem_gem_put_pages(struct drm_vgem_gem_object *obj);
 extern int vgem_gem_get_pages(struct drm_vgem_gem_object *obj);
 
-/* vgem_dma_buf.c */
-extern struct sg_table *vgem_gem_prime_get_sg_table(
-                       struct drm_gem_object *gobj);
-extern int vgem_gem_prime_pin(struct drm_gem_object *gobj);
-extern void vgem_gem_prime_unpin(struct drm_gem_object *gobj);
-extern void *vgem_gem_prime_vmap(struct drm_gem_object *gobj);
-extern void vgem_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
-extern struct drm_gem_object *vgem_gem_prime_import(struct drm_device *dev,
-                                                   struct dma_buf *dma_buf);
-
-
 #endif
index 41f167e4d75fdeec20d795b566fdeba1c642497f..7ce93d927f62d8d029c3f32cefd3dc3f3b2f36dd 100644 (file)
 #define USB_DEVICE_ID_ATEN_2PORTKVM    0x2204
 #define USB_DEVICE_ID_ATEN_4PORTKVM    0x2205
 #define USB_DEVICE_ID_ATEN_4PORTKVMC   0x2208
+#define USB_DEVICE_ID_ATEN_CS682       0x2213
 
 #define USB_VENDOR_ID_ATMEL            0x03eb
 #define USB_DEVICE_ID_ATMEL_MULTITOUCH 0x211c
index b3cf6fd4be96473ba62ebbbf92d1522c024a509e..5fd530acf747c50fcd5bfbd51d19d3271e5d716b 100644 (file)
@@ -44,7 +44,6 @@ MODULE_PARM_DESC(disable_raw_mode,
 /* bits 1..20 are reserved for classes */
 #define HIDPP_QUIRK_DELAYED_INIT               BIT(21)
 #define HIDPP_QUIRK_WTP_PHYSICAL_BUTTONS       BIT(22)
-#define HIDPP_QUIRK_MULTI_INPUT                        BIT(23)
 
 /*
  * There are two hidpp protocols in use, the first version hidpp10 is known
@@ -706,12 +705,6 @@ static int wtp_input_mapping(struct hid_device *hdev, struct hid_input *hi,
                struct hid_field *field, struct hid_usage *usage,
                unsigned long **bit, int *max)
 {
-       struct hidpp_device *hidpp = hid_get_drvdata(hdev);
-
-       if ((hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT) &&
-           (field->application == HID_GD_KEYBOARD))
-               return 0;
-
        return -1;
 }
 
@@ -720,10 +713,6 @@ static void wtp_populate_input(struct hidpp_device *hidpp,
 {
        struct wtp_data *wd = hidpp->private_data;
 
-       if ((hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT) && origin_is_hid_core)
-               /* this is the generic hid-input call */
-               return;
-
        __set_bit(EV_ABS, input_dev->evbit);
        __set_bit(EV_KEY, input_dev->evbit);
        __clear_bit(EV_REL, input_dev->evbit);
@@ -1245,10 +1234,6 @@ static int hidpp_probe(struct hid_device *hdev, const struct hid_device_id *id)
        if (hidpp->quirks & HIDPP_QUIRK_DELAYED_INIT)
                connect_mask &= ~HID_CONNECT_HIDINPUT;
 
-       /* Re-enable hidinput for multi-input devices */
-       if (hidpp->quirks & HIDPP_QUIRK_MULTI_INPUT)
-               connect_mask |= HID_CONNECT_HIDINPUT;
-
        ret = hid_hw_start(hdev, connect_mask);
        if (ret) {
                hid_err(hdev, "%s:hid_hw_start returned error\n", __func__);
@@ -1296,11 +1281,6 @@ static const struct hid_device_id hidpp_devices[] = {
          HID_BLUETOOTH_DEVICE(USB_VENDOR_ID_LOGITECH,
                USB_DEVICE_ID_LOGITECH_T651),
          .driver_data = HIDPP_QUIRK_CLASS_WTP },
-       { /* Keyboard TK820 */
-         HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
-               USB_VENDOR_ID_LOGITECH, 0x4102),
-         .driver_data = HIDPP_QUIRK_DELAYED_INIT | HIDPP_QUIRK_MULTI_INPUT |
-                        HIDPP_QUIRK_CLASS_WTP },
 
        { HID_DEVICE(BUS_USB, HID_GROUP_LOGITECH_DJ_DEVICE,
                USB_VENDOR_ID_LOGITECH, HID_ANY_ID)},
index c3f6f1e311ea0d98da6981669e292e085552a927..090a1ba0abb6fb1f6c937a7190980bbd8f318a0c 100644 (file)
@@ -294,7 +294,7 @@ int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
        if (!report)
                return -EINVAL;
 
-       mutex_lock(&hsdev->mutex);
+       mutex_lock(hsdev->mutex_ptr);
        if (flag == SENSOR_HUB_SYNC) {
                memset(&hsdev->pending, 0, sizeof(hsdev->pending));
                init_completion(&hsdev->pending.ready);
@@ -328,7 +328,7 @@ int sensor_hub_input_attr_get_raw_value(struct hid_sensor_hub_device *hsdev,
                kfree(hsdev->pending.raw_data);
                hsdev->pending.status = false;
        }
-       mutex_unlock(&hsdev->mutex);
+       mutex_unlock(hsdev->mutex_ptr);
 
        return ret_val;
 }
@@ -667,7 +667,14 @@ static int sensor_hub_probe(struct hid_device *hdev,
                        hsdev->vendor_id = hdev->vendor;
                        hsdev->product_id = hdev->product;
                        hsdev->usage = collection->usage;
-                       mutex_init(&hsdev->mutex);
+                       hsdev->mutex_ptr = devm_kzalloc(&hdev->dev,
+                                                       sizeof(struct mutex),
+                                                       GFP_KERNEL);
+                       if (!hsdev->mutex_ptr) {
+                               ret = -ENOMEM;
+                               goto err_stop_hw;
+                       }
+                       mutex_init(hsdev->mutex_ptr);
                        hsdev->start_collection_index = i;
                        if (last_hsdev)
                                last_hsdev->end_collection_index = i;
index ab4dd952b6ba654d91d75523951f4f02cbfa1138..92d6cdf024607d848249e011d14f19e88d7c4a39 100644 (file)
@@ -862,6 +862,7 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
        union acpi_object *obj;
        struct acpi_device *adev;
        acpi_handle handle;
+       int ret;
 
        handle = ACPI_HANDLE(&client->dev);
        if (!handle || acpi_bus_get_device(handle, &adev))
@@ -877,7 +878,9 @@ static int i2c_hid_acpi_pdata(struct i2c_client *client,
        pdata->hid_descriptor_address = obj->integer.value;
        ACPI_FREE(obj);
 
-       return acpi_dev_add_driver_gpios(adev, i2c_hid_acpi_gpios);
+       /* GPIOs are optional */
+       ret = acpi_dev_add_driver_gpios(adev, i2c_hid_acpi_gpios);
+       return ret < 0 && ret != -ENXIO ? ret : 0;
 }
 
 static const struct acpi_device_id i2c_hid_acpi_match[] = {
index a775143e6265e337597e2dd15e6c9d682c04354f..4696895eb708316944dac63092e959e4b1bb1765 100644 (file)
@@ -61,6 +61,7 @@ static const struct hid_blacklist {
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_2PORTKVM, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVM, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_4PORTKVMC, HID_QUIRK_NOGET },
+       { USB_VENDOR_ID_ATEN, USB_DEVICE_ID_ATEN_CS682, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FIGHTERSTICK, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_COMBATSTICK, HID_QUIRK_NOGET },
        { USB_VENDOR_ID_CH, USB_DEVICE_ID_CH_FLIGHT_SIM_ECLIPSE_YOKE, HID_QUIRK_NOGET },
index fa54d329065945bade5b9048df9cb5a98c0bf7f2..adf959dcfa5df9da7bca7414e2d4ac74a35e1457 100644 (file)
@@ -1072,6 +1072,9 @@ static int wacom_wac_finger_count_touches(struct wacom_wac *wacom)
        int count = 0;
        int i;
 
+       if (!touch_max)
+               return 0;
+
        /* non-HID_GENERIC single touch input doesn't call this routine */
        if ((touch_max == 1) && (wacom->features.type == HID_GENERIC))
                return wacom->hid_data.tipswitch &&
index ed303ba3a59393533a1f0d06b95abc53a5febcdd..3e03379e7c5d92c0191af13883c4e440dbc2cc9c 100644 (file)
@@ -63,7 +63,8 @@ MODULE_PARM_DESC(tjmax, "TjMax value in degrees Celsius");
 #define TO_ATTR_NO(cpu)                (TO_CORE_ID(cpu) + BASE_SYSFS_ATTR_NO)
 
 #ifdef CONFIG_SMP
-#define for_each_sibling(i, cpu)       for_each_cpu(i, cpu_sibling_mask(cpu))
+#define for_each_sibling(i, cpu) \
+       for_each_cpu(i, topology_sibling_cpumask(cpu))
 #else
 #define for_each_sibling(i, cpu)       for (i = 0; false; )
 #endif
index f3830db02d4637675cebbe7b6b5e185492571a1e..37f01702d08195b1a9bab1f82fe88434302556b9 100644 (file)
@@ -439,6 +439,7 @@ nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
                                 (*t)->dev_attr.attr.name, tg->base + i);
                        if ((*t)->s2) {
                                a2 = &su->u.a2;
+                               sysfs_attr_init(&a2->dev_attr.attr);
                                a2->dev_attr.attr.name = su->name;
                                a2->nr = (*t)->u.s.nr + i;
                                a2->index = (*t)->u.s.index;
@@ -449,6 +450,7 @@ nct6683_create_attr_group(struct device *dev, struct sensor_template_group *tg,
                                *attrs = &a2->dev_attr.attr;
                        } else {
                                a = &su->u.a1;
+                               sysfs_attr_init(&a->dev_attr.attr);
                                a->dev_attr.attr.name = su->name;
                                a->index = (*t)->u.index + i;
                                a->dev_attr.attr.mode =
index 4fcb481032992f475e8d196dc3a9dbcfa2407b30..bd1c99deac71b73dadf15615c1e8442027bccee9 100644 (file)
@@ -995,6 +995,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
                                 (*t)->dev_attr.attr.name, tg->base + i);
                        if ((*t)->s2) {
                                a2 = &su->u.a2;
+                               sysfs_attr_init(&a2->dev_attr.attr);
                                a2->dev_attr.attr.name = su->name;
                                a2->nr = (*t)->u.s.nr + i;
                                a2->index = (*t)->u.s.index;
@@ -1005,6 +1006,7 @@ nct6775_create_attr_group(struct device *dev, struct sensor_template_group *tg,
                                *attrs = &a2->dev_attr.attr;
                        } else {
                                a = &su->u.a1;
+                               sysfs_attr_init(&a->dev_attr.attr);
                                a->dev_attr.attr.name = su->name;
                                a->index = (*t)->u.index + i;
                                a->dev_attr.attr.mode =
index 112e4d45e4a0c31ff8234a36f008b629c71230a5..68800115876bf65867c187498902d0de6dcae687 100644 (file)
@@ -239,8 +239,10 @@ static struct ntc_thermistor_platform_data *
 ntc_thermistor_parse_dt(struct platform_device *pdev)
 {
        struct iio_channel *chan;
+       enum iio_chan_type type;
        struct device_node *np = pdev->dev.of_node;
        struct ntc_thermistor_platform_data *pdata;
+       int ret;
 
        if (!np)
                return NULL;
@@ -253,6 +255,13 @@ ntc_thermistor_parse_dt(struct platform_device *pdev)
        if (IS_ERR(chan))
                return ERR_CAST(chan);
 
+       ret = iio_get_channel_type(chan, &type);
+       if (ret < 0)
+               return ERR_PTR(ret);
+
+       if (type != IIO_VOLTAGE)
+               return ERR_PTR(-EINVAL);
+
        if (of_property_read_u32(np, "pullup-uv", &pdata->pullup_uv))
                return ERR_PTR(-ENODEV);
        if (of_property_read_u32(np, "pullup-ohm", &pdata->pullup_ohm))
index 99664ebc738d8003139135a89f07f97e3c6305e2..ccf4cffe0ee1dfac282b9afc6e28da5340aa51f3 100644 (file)
@@ -44,7 +44,7 @@
 #include <linux/sysfs.h>
 
 /* Addresses to scan */
-static const unsigned short normal_i2c[] = { 0x37, 0x48, 0x49, 0x4a, 0x4c, 0x4d,
+static const unsigned short normal_i2c[] = { 0x48, 0x49, 0x4a, 0x4c, 0x4d,
        0x4e, 0x4f, I2C_CLIENT_END };
 
 enum chips { tmp401, tmp411, tmp431, tmp432, tmp435 };
index 8fe78d08e01cf1551ea0eaf53f50d2185dfff809..7c6966434ee7b9a2707da849e56128af41c0baf5 100644 (file)
@@ -554,4 +554,4 @@ module_platform_driver(hix5hd2_i2c_driver);
 MODULE_DESCRIPTION("Hix5hd2 I2C Bus driver");
 MODULE_AUTHOR("Wei Yan <sledge.yanwei@huawei.com>");
 MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:i2c-hix5hd2");
+MODULE_ALIAS("platform:hix5hd2-i2c");
index 958c8db4ec30740e2d9aae00a7835256700d3424..297e9c9ac9432f5e645e06cf932710cd93c7f924 100644 (file)
@@ -1143,6 +1143,7 @@ static int s3c24xx_i2c_probe(struct platform_device *pdev)
                return -ENOMEM;
 
        i2c->quirks = s3c24xx_get_device_quirks(pdev);
+       i2c->sysreg = ERR_PTR(-ENOENT);
        if (pdata)
                memcpy(i2c->pdata, pdata, sizeof(*pdata));
        else
index a04c49f2a0118a887e22c16b9716656af6e87723..39ea67f9b066989ff901674cea38d2b1bb0ff2b3 100644 (file)
@@ -643,15 +643,6 @@ config BLK_DEV_TC86C001
        help
        This driver adds support for Toshiba TC86C001 GOKU-S chip.
 
-config BLK_DEV_CELLEB
-       tristate "Toshiba's Cell Reference Set IDE support"
-       depends on PPC_CELLEB
-       select BLK_DEV_IDEDMA_PCI
-       help
-         This driver provides support for the on-board IDE controller on
-         Toshiba Cell Reference Board.
-         If unsure, say Y.
-
 endif
 
 # TODO: BLK_DEV_IDEDMA_PCI -> BLK_DEV_IDEDMA_SFF
index a04ee82f1c8f5bf5bd712f90dd73744a37762e2a..2a8c417d4081087daa50d154844bcb65f6cb55c0 100644 (file)
@@ -38,7 +38,6 @@ obj-$(CONFIG_BLK_DEV_AEC62XX)         += aec62xx.o
 obj-$(CONFIG_BLK_DEV_ALI15X3)          += alim15x3.o
 obj-$(CONFIG_BLK_DEV_AMD74XX)          += amd74xx.o
 obj-$(CONFIG_BLK_DEV_ATIIXP)           += atiixp.o
-obj-$(CONFIG_BLK_DEV_CELLEB)           += scc_pata.o
 obj-$(CONFIG_BLK_DEV_CMD64X)           += cmd64x.o
 obj-$(CONFIG_BLK_DEV_CS5520)           += cs5520.o
 obj-$(CONFIG_BLK_DEV_CS5530)           += cs5530.o
diff --git a/drivers/ide/scc_pata.c b/drivers/ide/scc_pata.c
deleted file mode 100644 (file)
index 2a2d188..0000000
+++ /dev/null
@@ -1,887 +0,0 @@
-/*
- * Support for IDE interfaces on Celleb platform
- *
- * (C) Copyright 2006 TOSHIBA CORPORATION
- *
- * This code is based on drivers/ide/pci/siimage.c:
- * Copyright (C) 2001-2002     Andre Hedrick <andre@linux-ide.org>
- * Copyright (C) 2003          Red Hat
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
- */
-
-#include <linux/types.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/delay.h>
-#include <linux/ide.h>
-#include <linux/init.h>
-
-#define PCI_DEVICE_ID_TOSHIBA_SCC_ATA            0x01b4
-
-#define SCC_PATA_NAME           "scc IDE"
-
-#define TDVHSEL_MASTER          0x00000001
-#define TDVHSEL_SLAVE           0x00000004
-
-#define MODE_JCUSFEN            0x00000080
-
-#define CCKCTRL_ATARESET        0x00040000
-#define CCKCTRL_BUFCNT          0x00020000
-#define CCKCTRL_CRST            0x00010000
-#define CCKCTRL_OCLKEN          0x00000100
-#define CCKCTRL_ATACLKOEN       0x00000002
-#define CCKCTRL_LCLKEN          0x00000001
-
-#define QCHCD_IOS_SS           0x00000001
-
-#define QCHSD_STPDIAG          0x00020000
-
-#define INTMASK_MSK             0xD1000012
-#define INTSTS_SERROR          0x80000000
-#define INTSTS_PRERR           0x40000000
-#define INTSTS_RERR            0x10000000
-#define INTSTS_ICERR           0x01000000
-#define INTSTS_BMSINT          0x00000010
-#define INTSTS_BMHE            0x00000008
-#define INTSTS_IOIRQS           0x00000004
-#define INTSTS_INTRQ            0x00000002
-#define INTSTS_ACTEINT          0x00000001
-
-#define ECMODE_VALUE 0x01
-
-static struct scc_ports {
-       unsigned long ctl, dma;
-       struct ide_host *host;  /* for removing port from system */
-} scc_ports[MAX_HWIFS];
-
-/* PIO transfer mode  table */
-/* JCHST */
-static unsigned long JCHSTtbl[2][7] = {
-       {0x0E, 0x05, 0x02, 0x03, 0x02, 0x00, 0x00},   /* 100MHz */
-       {0x13, 0x07, 0x04, 0x04, 0x03, 0x00, 0x00}    /* 133MHz */
-};
-
-/* JCHHT */
-static unsigned long JCHHTtbl[2][7] = {
-       {0x0E, 0x02, 0x02, 0x02, 0x02, 0x00, 0x00},   /* 100MHz */
-       {0x13, 0x03, 0x03, 0x03, 0x03, 0x00, 0x00}    /* 133MHz */
-};
-
-/* JCHCT */
-static unsigned long JCHCTtbl[2][7] = {
-       {0x1D, 0x1D, 0x1C, 0x0B, 0x06, 0x00, 0x00},   /* 100MHz */
-       {0x27, 0x26, 0x26, 0x0E, 0x09, 0x00, 0x00}    /* 133MHz */
-};
-
-
-/* DMA transfer mode  table */
-/* JCHDCTM/JCHDCTS */
-static unsigned long JCHDCTxtbl[2][7] = {
-       {0x0A, 0x06, 0x04, 0x03, 0x01, 0x00, 0x00},   /* 100MHz */
-       {0x0E, 0x09, 0x06, 0x04, 0x02, 0x01, 0x00}    /* 133MHz */
-};
-
-/* JCSTWTM/JCSTWTS  */
-static unsigned long JCSTWTxtbl[2][7] = {
-       {0x06, 0x04, 0x03, 0x02, 0x02, 0x02, 0x00},   /* 100MHz */
-       {0x09, 0x06, 0x04, 0x02, 0x02, 0x02, 0x02}    /* 133MHz */
-};
-
-/* JCTSS */
-static unsigned long JCTSStbl[2][7] = {
-       {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x00},   /* 100MHz */
-       {0x05, 0x05, 0x05, 0x05, 0x05, 0x05, 0x05}    /* 133MHz */
-};
-
-/* JCENVT */
-static unsigned long JCENVTtbl[2][7] = {
-       {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00},   /* 100MHz */
-       {0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02}    /* 133MHz */
-};
-
-/* JCACTSELS/JCACTSELM */
-static unsigned long JCACTSELtbl[2][7] = {
-       {0x00, 0x00, 0x00, 0x00, 0x01, 0x01, 0x00},   /* 100MHz */
-       {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01}    /* 133MHz */
-};
-
-
-static u8 scc_ide_inb(unsigned long port)
-{
-       u32 data = in_be32((void*)port);
-       return (u8)data;
-}
-
-static void scc_exec_command(ide_hwif_t *hwif, u8 cmd)
-{
-       out_be32((void *)hwif->io_ports.command_addr, cmd);
-       eieio();
-       in_be32((void *)(hwif->dma_base + 0x01c));
-       eieio();
-}
-
-static u8 scc_read_status(ide_hwif_t *hwif)
-{
-       return (u8)in_be32((void *)hwif->io_ports.status_addr);
-}
-
-static u8 scc_read_altstatus(ide_hwif_t *hwif)
-{
-       return (u8)in_be32((void *)hwif->io_ports.ctl_addr);
-}
-
-static u8 scc_dma_sff_read_status(ide_hwif_t *hwif)
-{
-       return (u8)in_be32((void *)(hwif->dma_base + 4));
-}
-
-static void scc_write_devctl(ide_hwif_t *hwif, u8 ctl)
-{
-       out_be32((void *)hwif->io_ports.ctl_addr, ctl);
-       eieio();
-       in_be32((void *)(hwif->dma_base + 0x01c));
-       eieio();
-}
-
-static void scc_ide_insw(unsigned long port, void *addr, u32 count)
-{
-       u16 *ptr = (u16 *)addr;
-       while (count--) {
-               *ptr++ = le16_to_cpu(in_be32((void*)port));
-       }
-}
-
-static void scc_ide_insl(unsigned long port, void *addr, u32 count)
-{
-       u16 *ptr = (u16 *)addr;
-       while (count--) {
-               *ptr++ = le16_to_cpu(in_be32((void*)port));
-               *ptr++ = le16_to_cpu(in_be32((void*)port));
-       }
-}
-
-static void scc_ide_outb(u8 addr, unsigned long port)
-{
-       out_be32((void*)port, addr);
-}
-
-static void
-scc_ide_outsw(unsigned long port, void *addr, u32 count)
-{
-       u16 *ptr = (u16 *)addr;
-       while (count--) {
-               out_be32((void*)port, cpu_to_le16(*ptr++));
-       }
-}
-
-static void
-scc_ide_outsl(unsigned long port, void *addr, u32 count)
-{
-       u16 *ptr = (u16 *)addr;
-       while (count--) {
-               out_be32((void*)port, cpu_to_le16(*ptr++));
-               out_be32((void*)port, cpu_to_le16(*ptr++));
-       }
-}
-
-/**
- *     scc_set_pio_mode        -       set host controller for PIO mode
- *     @hwif: port
- *     @drive: drive
- *
- *     Load the timing settings for this device mode into the
- *     controller.
- */
-
-static void scc_set_pio_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
-       struct scc_ports *ports = ide_get_hwifdata(hwif);
-       unsigned long ctl_base = ports->ctl;
-       unsigned long cckctrl_port = ctl_base + 0xff0;
-       unsigned long piosht_port = ctl_base + 0x000;
-       unsigned long pioct_port = ctl_base + 0x004;
-       unsigned long reg;
-       int offset;
-       const u8 pio = drive->pio_mode - XFER_PIO_0;
-
-       reg = in_be32((void __iomem *)cckctrl_port);
-       if (reg & CCKCTRL_ATACLKOEN) {
-               offset = 1; /* 133MHz */
-       } else {
-               offset = 0; /* 100MHz */
-       }
-       reg = JCHSTtbl[offset][pio] << 16 | JCHHTtbl[offset][pio];
-       out_be32((void __iomem *)piosht_port, reg);
-       reg = JCHCTtbl[offset][pio];
-       out_be32((void __iomem *)pioct_port, reg);
-}
-
-/**
- *     scc_set_dma_mode        -       set host controller for DMA mode
- *     @hwif: port
- *     @drive: drive
- *
- *     Load the timing settings for this device mode into the
- *     controller.
- */
-
-static void scc_set_dma_mode(ide_hwif_t *hwif, ide_drive_t *drive)
-{
-       struct scc_ports *ports = ide_get_hwifdata(hwif);
-       unsigned long ctl_base = ports->ctl;
-       unsigned long cckctrl_port = ctl_base + 0xff0;
-       unsigned long mdmact_port = ctl_base + 0x008;
-       unsigned long mcrcst_port = ctl_base + 0x00c;
-       unsigned long sdmact_port = ctl_base + 0x010;
-       unsigned long scrcst_port = ctl_base + 0x014;
-       unsigned long udenvt_port = ctl_base + 0x018;
-       unsigned long tdvhsel_port   = ctl_base + 0x020;
-       int is_slave = drive->dn & 1;
-       int offset, idx;
-       unsigned long reg;
-       unsigned long jcactsel;
-       const u8 speed = drive->dma_mode;
-
-       reg = in_be32((void __iomem *)cckctrl_port);
-       if (reg & CCKCTRL_ATACLKOEN) {
-               offset = 1; /* 133MHz */
-       } else {
-               offset = 0; /* 100MHz */
-       }
-
-       idx = speed - XFER_UDMA_0;
-
-       jcactsel = JCACTSELtbl[offset][idx];
-       if (is_slave) {
-               out_be32((void __iomem *)sdmact_port, JCHDCTxtbl[offset][idx]);
-               out_be32((void __iomem *)scrcst_port, JCSTWTxtbl[offset][idx]);
-               jcactsel = jcactsel << 2;
-               out_be32((void __iomem *)tdvhsel_port, (in_be32((void __iomem *)tdvhsel_port) & ~TDVHSEL_SLAVE) | jcactsel);
-       } else {
-               out_be32((void __iomem *)mdmact_port, JCHDCTxtbl[offset][idx]);
-               out_be32((void __iomem *)mcrcst_port, JCSTWTxtbl[offset][idx]);
-               out_be32((void __iomem *)tdvhsel_port, (in_be32((void __iomem *)tdvhsel_port) & ~TDVHSEL_MASTER) | jcactsel);
-       }
-       reg = JCTSStbl[offset][idx] << 16 | JCENVTtbl[offset][idx];
-       out_be32((void __iomem *)udenvt_port, reg);
-}
-
-static void scc_dma_host_set(ide_drive_t *drive, int on)
-{
-       ide_hwif_t *hwif = drive->hwif;
-       u8 unit = drive->dn & 1;
-       u8 dma_stat = scc_dma_sff_read_status(hwif);
-
-       if (on)
-               dma_stat |= (1 << (5 + unit));
-       else
-               dma_stat &= ~(1 << (5 + unit));
-
-       scc_ide_outb(dma_stat, hwif->dma_base + 4);
-}
-
-/**
- *     scc_dma_setup   -       begin a DMA phase
- *     @drive: target device
- *     @cmd: command
- *
- *     Build an IDE DMA PRD (IDE speak for scatter gather table)
- *     and then set up the DMA transfer registers.
- *
- *     Returns 0 on success. If a PIO fallback is required then 1
- *     is returned.
- */
-
-static int scc_dma_setup(ide_drive_t *drive, struct ide_cmd *cmd)
-{
-       ide_hwif_t *hwif = drive->hwif;
-       u32 rw = (cmd->tf_flags & IDE_TFLAG_WRITE) ? 0 : ATA_DMA_WR;
-       u8 dma_stat;
-
-       /* fall back to pio! */
-       if (ide_build_dmatable(drive, cmd) == 0)
-               return 1;
-
-       /* PRD table */
-       out_be32((void __iomem *)(hwif->dma_base + 8), hwif->dmatable_dma);
-
-       /* specify r/w */
-       out_be32((void __iomem *)hwif->dma_base, rw);
-
-       /* read DMA status for INTR & ERROR flags */
-       dma_stat = scc_dma_sff_read_status(hwif);
-
-       /* clear INTR & ERROR flags */
-       out_be32((void __iomem *)(hwif->dma_base + 4), dma_stat | 6);
-
-       return 0;
-}
-
-static void scc_dma_start(ide_drive_t *drive)
-{
-       ide_hwif_t *hwif = drive->hwif;
-       u8 dma_cmd = scc_ide_inb(hwif->dma_base);
-
-       /* start DMA */
-       scc_ide_outb(dma_cmd | 1, hwif->dma_base);
-}
-
-static int __scc_dma_end(ide_drive_t *drive)
-{
-       ide_hwif_t *hwif = drive->hwif;
-       u8 dma_stat, dma_cmd;
-
-       /* get DMA command mode */
-       dma_cmd = scc_ide_inb(hwif->dma_base);
-       /* stop DMA */
-       scc_ide_outb(dma_cmd & ~1, hwif->dma_base);
-       /* get DMA status */
-       dma_stat = scc_dma_sff_read_status(hwif);
-       /* clear the INTR & ERROR bits */
-       scc_ide_outb(dma_stat | 6, hwif->dma_base + 4);
-       /* verify good DMA status */
-       return (dma_stat & 7) != 4 ? (0x10 | dma_stat) : 0;
-}
-
-/**
- *     scc_dma_end     -       Stop DMA
- *     @drive: IDE drive
- *
- *     Check and clear INT Status register.
- *     Then call __scc_dma_end().
- */
-
-static int scc_dma_end(ide_drive_t *drive)
-{
-       ide_hwif_t *hwif = drive->hwif;
-       void __iomem *dma_base = (void __iomem *)hwif->dma_base;
-       unsigned long intsts_port = hwif->dma_base + 0x014;
-       u32 reg;
-       int dma_stat, data_loss = 0;
-       static int retry = 0;
-
-       /* errata A308 workaround: Step5 (check data loss) */
-       /* We don't check non ide_disk because it is limited to UDMA4 */
-       if (!(in_be32((void __iomem *)hwif->io_ports.ctl_addr)
-             & ATA_ERR) &&
-           drive->media == ide_disk && drive->current_speed > XFER_UDMA_4) {
-               reg = in_be32((void __iomem *)intsts_port);
-               if (!(reg & INTSTS_ACTEINT)) {
-                       printk(KERN_WARNING "%s: operation failed (transfer data loss)\n",
-                              drive->name);
-                       data_loss = 1;
-                       if (retry++) {
-                               struct request *rq = hwif->rq;
-                               ide_drive_t *drive;
-                               int i;
-
-                               /* ERROR_RESET and drive->crc_count are needed
-                                * to reduce DMA transfer mode in retry process.
-                                */
-                               if (rq)
-                                       rq->errors |= ERROR_RESET;
-
-                               ide_port_for_each_dev(i, drive, hwif)
-                                       drive->crc_count++;
-                       }
-               }
-       }
-
-       while (1) {
-               reg = in_be32((void __iomem *)intsts_port);
-
-               if (reg & INTSTS_SERROR) {
-                       printk(KERN_WARNING "%s: SERROR\n", SCC_PATA_NAME);
-                       out_be32((void __iomem *)intsts_port, INTSTS_SERROR|INTSTS_BMSINT);
-
-                       out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
-                       continue;
-               }
-
-               if (reg & INTSTS_PRERR) {
-                       u32 maea0, maec0;
-                       unsigned long ctl_base = hwif->config_data;
-
-                       maea0 = in_be32((void __iomem *)(ctl_base + 0xF50));
-                       maec0 = in_be32((void __iomem *)(ctl_base + 0xF54));
-
-                       printk(KERN_WARNING "%s: PRERR [addr:%x cmd:%x]\n", SCC_PATA_NAME, maea0, maec0);
-
-                       out_be32((void __iomem *)intsts_port, INTSTS_PRERR|INTSTS_BMSINT);
-
-                       out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
-                       continue;
-               }
-
-               if (reg & INTSTS_RERR) {
-                       printk(KERN_WARNING "%s: Response Error\n", SCC_PATA_NAME);
-                       out_be32((void __iomem *)intsts_port, INTSTS_RERR|INTSTS_BMSINT);
-
-                       out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
-                       continue;
-               }
-
-               if (reg & INTSTS_ICERR) {
-                       out_be32(dma_base, in_be32(dma_base) & ~QCHCD_IOS_SS);
-
-                       printk(KERN_WARNING "%s: Illegal Configuration\n", SCC_PATA_NAME);
-                       out_be32((void __iomem *)intsts_port, INTSTS_ICERR|INTSTS_BMSINT);
-                       continue;
-               }
-
-               if (reg & INTSTS_BMSINT) {
-                       printk(KERN_WARNING "%s: Internal Bus Error\n", SCC_PATA_NAME);
-                       out_be32((void __iomem *)intsts_port, INTSTS_BMSINT);
-
-                       ide_do_reset(drive);
-                       continue;
-               }
-
-               if (reg & INTSTS_BMHE) {
-                       out_be32((void __iomem *)intsts_port, INTSTS_BMHE);
-                       continue;
-               }
-
-               if (reg & INTSTS_ACTEINT) {
-                       out_be32((void __iomem *)intsts_port, INTSTS_ACTEINT);
-                       continue;
-               }
-
-               if (reg & INTSTS_IOIRQS) {
-                       out_be32((void __iomem *)intsts_port, INTSTS_IOIRQS);
-                       continue;
-               }
-               break;
-       }
-
-       dma_stat = __scc_dma_end(drive);
-       if (data_loss)
-               dma_stat |= 2; /* emulate DMA error (to retry command) */
-       return dma_stat;
-}
-
-/* returns 1 if dma irq issued, 0 otherwise */
-static int scc_dma_test_irq(ide_drive_t *drive)
-{
-       ide_hwif_t *hwif = drive->hwif;
-       u32 int_stat = in_be32((void __iomem *)hwif->dma_base + 0x014);
-
-       /* SCC errata A252,A308 workaround: Step4 */
-       if ((in_be32((void __iomem *)hwif->io_ports.ctl_addr)
-            & ATA_ERR) &&
-           (int_stat & INTSTS_INTRQ))
-               return 1;
-
-       /* SCC errata A308 workaround: Step5 (polling IOIRQS) */
-       if (int_stat & INTSTS_IOIRQS)
-               return 1;
-
-       return 0;
-}
-
-static u8 scc_udma_filter(ide_drive_t *drive)
-{
-       ide_hwif_t *hwif = drive->hwif;
-       u8 mask = hwif->ultra_mask;
-
-       /* errata A308 workaround: limit non ide_disk drive to UDMA4 */
-       if ((drive->media != ide_disk) && (mask & 0xE0)) {
-               printk(KERN_INFO "%s: limit %s to UDMA4\n",
-                      SCC_PATA_NAME, drive->name);
-               mask = ATA_UDMA4;
-       }
-
-       return mask;
-}
-
-/**
- *     setup_mmio_scc  -       map CTRL/BMID region
- *     @dev: PCI device we are configuring
- *     @name: device name
- *
- */
-
-static int setup_mmio_scc (struct pci_dev *dev, const char *name)
-{
-       void __iomem *ctl_addr;
-       void __iomem *dma_addr;
-       int i, ret;
-
-       for (i = 0; i < MAX_HWIFS; i++) {
-               if (scc_ports[i].ctl == 0)
-                       break;
-       }
-       if (i >= MAX_HWIFS)
-               return -ENOMEM;
-
-       ret = pci_request_selected_regions(dev, (1 << 2) - 1, name);
-       if (ret < 0) {
-               printk(KERN_ERR "%s: can't reserve resources\n", name);
-               return ret;
-       }
-
-       ctl_addr = pci_ioremap_bar(dev, 0);
-       if (!ctl_addr)
-               goto fail_0;
-
-       dma_addr = pci_ioremap_bar(dev, 1);
-       if (!dma_addr)
-               goto fail_1;
-
-       pci_set_master(dev);
-       scc_ports[i].ctl = (unsigned long)ctl_addr;
-       scc_ports[i].dma = (unsigned long)dma_addr;
-       pci_set_drvdata(dev, (void *) &scc_ports[i]);
-
-       return 1;
-
- fail_1:
-       iounmap(ctl_addr);
- fail_0:
-       return -ENOMEM;
-}
-
-static int scc_ide_setup_pci_device(struct pci_dev *dev,
-                                   const struct ide_port_info *d)
-{
-       struct scc_ports *ports = pci_get_drvdata(dev);
-       struct ide_host *host;
-       struct ide_hw hw, *hws[] = { &hw };
-       int i, rc;
-
-       memset(&hw, 0, sizeof(hw));
-       for (i = 0; i <= 8; i++)
-               hw.io_ports_array[i] = ports->dma + 0x20 + i * 4;
-       hw.irq = dev->irq;
-       hw.dev = &dev->dev;
-
-       rc = ide_host_add(d, hws, 1, &host);
-       if (rc)
-               return rc;
-
-       ports->host = host;
-
-       return 0;
-}
-
-/**
- *     init_setup_scc  -       set up an SCC PATA Controller
- *     @dev: PCI device
- *     @d: IDE port info
- *
- *     Perform the initial set up for this device.
- */
-
-static int init_setup_scc(struct pci_dev *dev, const struct ide_port_info *d)
-{
-       unsigned long ctl_base;
-       unsigned long dma_base;
-       unsigned long cckctrl_port;
-       unsigned long intmask_port;
-       unsigned long mode_port;
-       unsigned long ecmode_port;
-       u32 reg = 0;
-       struct scc_ports *ports;
-       int rc;
-
-       rc = pci_enable_device(dev);
-       if (rc)
-               goto end;
-
-       rc = setup_mmio_scc(dev, d->name);
-       if (rc < 0)
-               goto end;
-
-       ports = pci_get_drvdata(dev);
-       ctl_base = ports->ctl;
-       dma_base = ports->dma;
-       cckctrl_port = ctl_base + 0xff0;
-       intmask_port = dma_base + 0x010;
-       mode_port = ctl_base + 0x024;
-       ecmode_port = ctl_base + 0xf00;
-
-       /* controller initialization */
-       reg = 0;
-       out_be32((void*)cckctrl_port, reg);
-       reg |= CCKCTRL_ATACLKOEN;
-       out_be32((void*)cckctrl_port, reg);
-       reg |= CCKCTRL_LCLKEN | CCKCTRL_OCLKEN;
-       out_be32((void*)cckctrl_port, reg);
-       reg |= CCKCTRL_CRST;
-       out_be32((void*)cckctrl_port, reg);
-
-       for (;;) {
-               reg = in_be32((void*)cckctrl_port);
-               if (reg & CCKCTRL_CRST)
-                       break;
-               udelay(5000);
-       }
-
-       reg |= CCKCTRL_ATARESET;
-       out_be32((void*)cckctrl_port, reg);
-
-       out_be32((void*)ecmode_port, ECMODE_VALUE);
-       out_be32((void*)mode_port, MODE_JCUSFEN);
-       out_be32((void*)intmask_port, INTMASK_MSK);
-
-       rc = scc_ide_setup_pci_device(dev, d);
-
- end:
-       return rc;
-}
-
-static void scc_tf_load(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid)
-{
-       struct ide_io_ports *io_ports = &drive->hwif->io_ports;
-
-       if (valid & IDE_VALID_FEATURE)
-               scc_ide_outb(tf->feature, io_ports->feature_addr);
-       if (valid & IDE_VALID_NSECT)
-               scc_ide_outb(tf->nsect, io_ports->nsect_addr);
-       if (valid & IDE_VALID_LBAL)
-               scc_ide_outb(tf->lbal, io_ports->lbal_addr);
-       if (valid & IDE_VALID_LBAM)
-               scc_ide_outb(tf->lbam, io_ports->lbam_addr);
-       if (valid & IDE_VALID_LBAH)
-               scc_ide_outb(tf->lbah, io_ports->lbah_addr);
-       if (valid & IDE_VALID_DEVICE)
-               scc_ide_outb(tf->device, io_ports->device_addr);
-}
-
-static void scc_tf_read(ide_drive_t *drive, struct ide_taskfile *tf, u8 valid)
-{
-       struct ide_io_ports *io_ports = &drive->hwif->io_ports;
-
-       if (valid & IDE_VALID_ERROR)
-               tf->error  = scc_ide_inb(io_ports->feature_addr);
-       if (valid & IDE_VALID_NSECT)
-               tf->nsect  = scc_ide_inb(io_ports->nsect_addr);
-       if (valid & IDE_VALID_LBAL)
-               tf->lbal   = scc_ide_inb(io_ports->lbal_addr);
-       if (valid & IDE_VALID_LBAM)
-               tf->lbam   = scc_ide_inb(io_ports->lbam_addr);
-       if (valid & IDE_VALID_LBAH)
-               tf->lbah   = scc_ide_inb(io_ports->lbah_addr);
-       if (valid & IDE_VALID_DEVICE)
-               tf->device = scc_ide_inb(io_ports->device_addr);
-}
-
-static void scc_input_data(ide_drive_t *drive, struct ide_cmd *cmd,
-                          void *buf, unsigned int len)
-{
-       unsigned long data_addr = drive->hwif->io_ports.data_addr;
-
-       len++;
-
-       if (drive->io_32bit) {
-               scc_ide_insl(data_addr, buf, len / 4);
-
-               if ((len & 3) >= 2)
-                       scc_ide_insw(data_addr, (u8 *)buf + (len & ~3), 1);
-       } else
-               scc_ide_insw(data_addr, buf, len / 2);
-}
-
-static void scc_output_data(ide_drive_t *drive,  struct ide_cmd *cmd,
-                           void *buf, unsigned int len)
-{
-       unsigned long data_addr = drive->hwif->io_ports.data_addr;
-
-       len++;
-
-       if (drive->io_32bit) {
-               scc_ide_outsl(data_addr, buf, len / 4);
-
-               if ((len & 3) >= 2)
-                       scc_ide_outsw(data_addr, (u8 *)buf + (len & ~3), 1);
-       } else
-               scc_ide_outsw(data_addr, buf, len / 2);
-}
-
-/**
- *     init_mmio_iops_scc      -       set up the iops for MMIO
- *     @hwif: interface to set up
- *
- */
-
-static void init_mmio_iops_scc(ide_hwif_t *hwif)
-{
-       struct pci_dev *dev = to_pci_dev(hwif->dev);
-       struct scc_ports *ports = pci_get_drvdata(dev);
-       unsigned long dma_base = ports->dma;
-
-       ide_set_hwifdata(hwif, ports);
-
-       hwif->dma_base = dma_base;
-       hwif->config_data = ports->ctl;
-}
-
-/**
- *     init_iops_scc   -       set up iops
- *     @hwif: interface to set up
- *
- *     Do the basic setup for the SCC hardware interface
- *     and then do the MMIO setup.
- */
-
-static void init_iops_scc(ide_hwif_t *hwif)
-{
-       struct pci_dev *dev = to_pci_dev(hwif->dev);
-
-       hwif->hwif_data = NULL;
-       if (pci_get_drvdata(dev) == NULL)
-               return;
-       init_mmio_iops_scc(hwif);
-}
-
-static int scc_init_dma(ide_hwif_t *hwif, const struct ide_port_info *d)
-{
-       return ide_allocate_dma_engine(hwif);
-}
-
-static u8 scc_cable_detect(ide_hwif_t *hwif)
-{
-       return ATA_CBL_PATA80;
-}
-
-/**
- *     init_hwif_scc   -       set up hwif
- *     @hwif: interface to set up
- *
- *     We do the basic set up of the interface structure. The SCC
- *     requires several custom handlers so we override the default
- *     ide DMA handlers appropriately.
- */
-
-static void init_hwif_scc(ide_hwif_t *hwif)
-{
-       /* PTERADD */
-       out_be32((void __iomem *)(hwif->dma_base + 0x018), hwif->dmatable_dma);
-
-       if (in_be32((void __iomem *)(hwif->config_data + 0xff0)) & CCKCTRL_ATACLKOEN)
-               hwif->ultra_mask = ATA_UDMA6; /* 133MHz */
-       else
-               hwif->ultra_mask = ATA_UDMA5; /* 100MHz */
-}
-
-static const struct ide_tp_ops scc_tp_ops = {
-       .exec_command           = scc_exec_command,
-       .read_status            = scc_read_status,
-       .read_altstatus         = scc_read_altstatus,
-       .write_devctl           = scc_write_devctl,
-
-       .dev_select             = ide_dev_select,
-       .tf_load                = scc_tf_load,
-       .tf_read                = scc_tf_read,
-
-       .input_data             = scc_input_data,
-       .output_data            = scc_output_data,
-};
-
-static const struct ide_port_ops scc_port_ops = {
-       .set_pio_mode           = scc_set_pio_mode,
-       .set_dma_mode           = scc_set_dma_mode,
-       .udma_filter            = scc_udma_filter,
-       .cable_detect           = scc_cable_detect,
-};
-
-static const struct ide_dma_ops scc_dma_ops = {
-       .dma_host_set           = scc_dma_host_set,
-       .dma_setup              = scc_dma_setup,
-       .dma_start              = scc_dma_start,
-       .dma_end                = scc_dma_end,
-       .dma_test_irq           = scc_dma_test_irq,
-       .dma_lost_irq           = ide_dma_lost_irq,
-       .dma_timer_expiry       = ide_dma_sff_timer_expiry,
-       .dma_sff_read_status    = scc_dma_sff_read_status,
-};
-
-static const struct ide_port_info scc_chipset = {
-       .name           = "sccIDE",
-       .init_iops      = init_iops_scc,
-       .init_dma       = scc_init_dma,
-       .init_hwif      = init_hwif_scc,
-       .tp_ops         = &scc_tp_ops,
-       .port_ops       = &scc_port_ops,
-       .dma_ops        = &scc_dma_ops,
-       .host_flags     = IDE_HFLAG_SINGLE,
-       .irq_flags      = IRQF_SHARED,
-       .pio_mask       = ATA_PIO4,
-       .chipset        = ide_pci,
-};
-
-/**
- *     scc_init_one    -       pci layer discovery entry
- *     @dev: PCI device
- *     @id: ident table entry
- *
- *     Called by the PCI code when it finds an SCC PATA controller.
- *     We then use the IDE PCI generic helper to do most of the work.
- */
-
-static int scc_init_one(struct pci_dev *dev, const struct pci_device_id *id)
-{
-       return init_setup_scc(dev, &scc_chipset);
-}
-
-/**
- *     scc_remove      -       pci layer remove entry
- *     @dev: PCI device
- *
- *     Called by the PCI code when it removes an SCC PATA controller.
- */
-
-static void scc_remove(struct pci_dev *dev)
-{
-       struct scc_ports *ports = pci_get_drvdata(dev);
-       struct ide_host *host = ports->host;
-
-       ide_host_remove(host);
-
-       iounmap((void*)ports->dma);
-       iounmap((void*)ports->ctl);
-       pci_release_selected_regions(dev, (1 << 2) - 1);
-       memset(ports, 0, sizeof(*ports));
-}
-
-static const struct pci_device_id scc_pci_tbl[] = {
-       { PCI_VDEVICE(TOSHIBA_2, PCI_DEVICE_ID_TOSHIBA_SCC_ATA), 0 },
-       { 0, },
-};
-MODULE_DEVICE_TABLE(pci, scc_pci_tbl);
-
-static struct pci_driver scc_pci_driver = {
-       .name = "SCC IDE",
-       .id_table = scc_pci_tbl,
-       .probe = scc_init_one,
-       .remove = scc_remove,
-};
-
-static int __init scc_ide_init(void)
-{
-       return ide_pci_register_driver(&scc_pci_driver);
-}
-
-static void __exit scc_ide_exit(void)
-{
-       pci_unregister_driver(&scc_pci_driver);
-}
-
-module_init(scc_ide_init);
-module_exit(scc_ide_exit);
-
-MODULE_DESCRIPTION("PCI driver module for Toshiba SCC IDE");
-MODULE_LICENSE("GPL");
index 7f55a6d7cd035d5e7d2fbad512ce60f3465508e2..c6d5a3a40b609c9044fb57846fca951eabfe199b 100644 (file)
@@ -389,7 +389,12 @@ int mma9551_read_config_words(struct i2c_client *client, u8 app_id,
 {
        int ret, i;
        int len_words = len / sizeof(u16);
-       __be16 be_buf[MMA9551_MAX_MAILBOX_DATA_REGS];
+       __be16 be_buf[MMA9551_MAX_MAILBOX_DATA_REGS / 2];
+
+       if (len_words > ARRAY_SIZE(be_buf)) {
+               dev_err(&client->dev, "Invalid buffer size %d\n", len);
+               return -EINVAL;
+       }
 
        ret = mma9551_transfer(client, app_id, MMA9551_CMD_READ_CONFIG,
                               reg, NULL, 0, (u8 *) be_buf, len);
@@ -424,7 +429,12 @@ int mma9551_read_status_words(struct i2c_client *client, u8 app_id,
 {
        int ret, i;
        int len_words = len / sizeof(u16);
-       __be16 be_buf[MMA9551_MAX_MAILBOX_DATA_REGS];
+       __be16 be_buf[MMA9551_MAX_MAILBOX_DATA_REGS / 2];
+
+       if (len_words > ARRAY_SIZE(be_buf)) {
+               dev_err(&client->dev, "Invalid buffer size %d\n", len);
+               return -EINVAL;
+       }
 
        ret = mma9551_transfer(client, app_id, MMA9551_CMD_READ_STATUS,
                               reg, NULL, 0, (u8 *) be_buf, len);
@@ -459,7 +469,12 @@ int mma9551_write_config_words(struct i2c_client *client, u8 app_id,
 {
        int i;
        int len_words = len / sizeof(u16);
-       __be16 be_buf[MMA9551_MAX_MAILBOX_DATA_REGS];
+       __be16 be_buf[(MMA9551_MAX_MAILBOX_DATA_REGS - 1) / 2];
+
+       if (len_words > ARRAY_SIZE(be_buf)) {
+               dev_err(&client->dev, "Invalid buffer size %d\n", len);
+               return -EINVAL;
+       }
 
        for (i = 0; i < len_words; i++)
                be_buf[i] = cpu_to_be16(buf[i]);
index 2df1af7d43fc6df34b87f80373e413152e33a739..365a109aaaefe8f5367c8de6a1f0e2ccfc9bf463 100644 (file)
@@ -54,6 +54,7 @@
 #define MMA9553_MASK_CONF_STEPCOALESCE         GENMASK(7, 0)
 
 #define MMA9553_REG_CONF_ACTTHD                        0x0E
+#define MMA9553_MAX_ACTTHD                     GENMASK(15, 0)
 
 /* Pedometer status registers (R-only) */
 #define MMA9553_REG_STATUS                     0x00
@@ -316,22 +317,19 @@ static int mma9553_set_config(struct mma9553_data *data, u16 reg,
 static int mma9553_read_activity_stepcnt(struct mma9553_data *data,
                                         u8 *activity, u16 *stepcnt)
 {
-       u32 status_stepcnt;
-       u16 status;
+       u16 buf[2];
        int ret;
 
        ret = mma9551_read_status_words(data->client, MMA9551_APPID_PEDOMETER,
-                                       MMA9553_REG_STATUS, sizeof(u32),
-                                       (u16 *) &status_stepcnt);
+                                       MMA9553_REG_STATUS, sizeof(u32), buf);
        if (ret < 0) {
                dev_err(&data->client->dev,
                        "error reading status and stepcnt\n");
                return ret;
        }
 
-       status = status_stepcnt & MMA9553_MASK_CONF_WORD;
-       *activity = mma9553_get_bits(status, MMA9553_MASK_STATUS_ACTIVITY);
-       *stepcnt = status_stepcnt >> 16;
+       *activity = mma9553_get_bits(buf[0], MMA9553_MASK_STATUS_ACTIVITY);
+       *stepcnt = buf[1];
 
        return 0;
 }
@@ -872,6 +870,9 @@ static int mma9553_write_event_value(struct iio_dev *indio_dev,
        case IIO_EV_INFO_PERIOD:
                switch (chan->type) {
                case IIO_ACTIVITY:
+                       if (val < 0 || val > MMA9553_ACTIVITY_THD_TO_SEC(
+                           MMA9553_MAX_ACTTHD))
+                               return -EINVAL;
                        mutex_lock(&data->mutex);
                        ret = mma9553_set_config(data, MMA9553_REG_CONF_ACTTHD,
                                                 &data->conf.actthd,
@@ -971,7 +972,8 @@ static const struct iio_chan_spec_ext_info mma9553_ext_info[] = {
        .modified = 1,                                                  \
        .channel2 = _chan2,                                             \
        .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),             \
-       .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBHEIGHT),     \
+       .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_CALIBHEIGHT) |    \
+                                   BIT(IIO_CHAN_INFO_ENABLE),          \
        .event_spec = mma9553_activity_events,                          \
        .num_event_specs = ARRAY_SIZE(mma9553_activity_events),         \
        .ext_info = mma9553_ext_info,                                   \
index 58d1d13d552ae8cf336061f93603e4dd07b222ac..211b13271c61566c5d3f3d4e9990bcd3c7470823 100644 (file)
@@ -546,6 +546,7 @@ int st_accel_common_probe(struct iio_dev *indio_dev)
 
        indio_dev->modes = INDIO_DIRECT_MODE;
        indio_dev->info = &accel_info;
+       mutex_init(&adata->tb.buf_lock);
 
        st_sensors_power_enable(indio_dev);
 
index 08bcfb061ca5617505a47d9fa96b5f762b867f63..56008a86b78f854229943769307ce2fd2c328201 100644 (file)
@@ -53,39 +53,42 @@ static const struct iio_chan_spec const axp288_adc_channels[] = {
                .channel = 0,
                .address = AXP288_TS_ADC_H,
                .datasheet_name = "TS_PIN",
+               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
        }, {
                .indexed = 1,
                .type = IIO_TEMP,
                .channel = 1,
                .address = AXP288_PMIC_ADC_H,
                .datasheet_name = "PMIC_TEMP",
+               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
        }, {
                .indexed = 1,
                .type = IIO_TEMP,
                .channel = 2,
                .address = AXP288_GP_ADC_H,
                .datasheet_name = "GPADC",
+               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
        }, {
                .indexed = 1,
                .type = IIO_CURRENT,
                .channel = 3,
                .address = AXP20X_BATT_CHRG_I_H,
                .datasheet_name = "BATT_CHG_I",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
        }, {
                .indexed = 1,
                .type = IIO_CURRENT,
                .channel = 4,
                .address = AXP20X_BATT_DISCHRG_I_H,
                .datasheet_name = "BATT_DISCHRG_I",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
        }, {
                .indexed = 1,
                .type = IIO_VOLTAGE,
                .channel = 5,
                .address = AXP20X_BATT_V_H,
                .datasheet_name = "BATT_V",
-               .info_mask_separate = BIT(IIO_CHAN_INFO_PROCESSED),
+               .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
        },
 };
 
@@ -151,9 +154,6 @@ static int axp288_adc_read_raw(struct iio_dev *indio_dev,
                                                chan->address))
                        dev_err(&indio_dev->dev, "TS pin restore\n");
                break;
-       case IIO_CHAN_INFO_PROCESSED:
-               ret = axp288_adc_read_channel(val, chan->address, info->regmap);
-               break;
        default:
                ret = -EINVAL;
        }
index 51e2a83c9404ca3ddc0a24d64ffa47f6090500ba..115f6e99a7fa7c603ca9655d04759545c48e5932 100644 (file)
@@ -35,8 +35,9 @@
 #define CC10001_ADC_EOC_SET            BIT(0)
 
 #define CC10001_ADC_CHSEL_SAMPLED      0x0c
-#define CC10001_ADC_POWER_UP           0x10
-#define CC10001_ADC_POWER_UP_SET       BIT(0)
+#define CC10001_ADC_POWER_DOWN         0x10
+#define CC10001_ADC_POWER_DOWN_SET     BIT(0)
+
 #define CC10001_ADC_DEBUG              0x14
 #define CC10001_ADC_DATA_COUNT         0x20
 
@@ -62,7 +63,6 @@ struct cc10001_adc_device {
        u16 *buf;
 
        struct mutex lock;
-       unsigned long channel_map;
        unsigned int start_delay_ns;
        unsigned int eoc_delay_ns;
 };
@@ -79,6 +79,18 @@ static inline u32 cc10001_adc_read_reg(struct cc10001_adc_device *adc_dev,
        return readl(adc_dev->reg_base + reg);
 }
 
+static void cc10001_adc_power_up(struct cc10001_adc_device *adc_dev)
+{
+       cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_DOWN, 0);
+       ndelay(adc_dev->start_delay_ns);
+}
+
+static void cc10001_adc_power_down(struct cc10001_adc_device *adc_dev)
+{
+       cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_DOWN,
+                             CC10001_ADC_POWER_DOWN_SET);
+}
+
 static void cc10001_adc_start(struct cc10001_adc_device *adc_dev,
                              unsigned int channel)
 {
@@ -88,6 +100,7 @@ static void cc10001_adc_start(struct cc10001_adc_device *adc_dev,
        val = (channel & CC10001_ADC_CH_MASK) | CC10001_ADC_MODE_SINGLE_CONV;
        cc10001_adc_write_reg(adc_dev, CC10001_ADC_CONFIG, val);
 
+       udelay(1);
        val = cc10001_adc_read_reg(adc_dev, CC10001_ADC_CONFIG);
        val = val | CC10001_ADC_START_CONV;
        cc10001_adc_write_reg(adc_dev, CC10001_ADC_CONFIG, val);
@@ -129,6 +142,7 @@ static irqreturn_t cc10001_adc_trigger_h(int irq, void *p)
        struct iio_dev *indio_dev;
        unsigned int delay_ns;
        unsigned int channel;
+       unsigned int scan_idx;
        bool sample_invalid;
        u16 *data;
        int i;
@@ -139,20 +153,17 @@ static irqreturn_t cc10001_adc_trigger_h(int irq, void *p)
 
        mutex_lock(&adc_dev->lock);
 
-       cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_UP,
-                             CC10001_ADC_POWER_UP_SET);
-
-       /* Wait for 8 (6+2) clock cycles before activating START */
-       ndelay(adc_dev->start_delay_ns);
+       cc10001_adc_power_up(adc_dev);
 
        /* Calculate delay step for eoc and sampled data */
        delay_ns = adc_dev->eoc_delay_ns / CC10001_MAX_POLL_COUNT;
 
        i = 0;
        sample_invalid = false;
-       for_each_set_bit(channel, indio_dev->active_scan_mask,
+       for_each_set_bit(scan_idx, indio_dev->active_scan_mask,
                                  indio_dev->masklength) {
 
+               channel = indio_dev->channels[scan_idx].channel;
                cc10001_adc_start(adc_dev, channel);
 
                data[i] = cc10001_adc_poll_done(indio_dev, channel, delay_ns);
@@ -166,7 +177,7 @@ static irqreturn_t cc10001_adc_trigger_h(int irq, void *p)
        }
 
 done:
-       cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_UP, 0);
+       cc10001_adc_power_down(adc_dev);
 
        mutex_unlock(&adc_dev->lock);
 
@@ -185,11 +196,7 @@ static u16 cc10001_adc_read_raw_voltage(struct iio_dev *indio_dev,
        unsigned int delay_ns;
        u16 val;
 
-       cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_UP,
-                             CC10001_ADC_POWER_UP_SET);
-
-       /* Wait for 8 (6+2) clock cycles before activating START */
-       ndelay(adc_dev->start_delay_ns);
+       cc10001_adc_power_up(adc_dev);
 
        /* Calculate delay step for eoc and sampled data */
        delay_ns = adc_dev->eoc_delay_ns / CC10001_MAX_POLL_COUNT;
@@ -198,7 +205,7 @@ static u16 cc10001_adc_read_raw_voltage(struct iio_dev *indio_dev,
 
        val = cc10001_adc_poll_done(indio_dev, chan->channel, delay_ns);
 
-       cc10001_adc_write_reg(adc_dev, CC10001_ADC_POWER_UP, 0);
+       cc10001_adc_power_down(adc_dev);
 
        return val;
 }
@@ -224,7 +231,7 @@ static int cc10001_adc_read_raw(struct iio_dev *indio_dev,
 
        case IIO_CHAN_INFO_SCALE:
                ret = regulator_get_voltage(adc_dev->reg);
-               if (ret)
+               if (ret < 0)
                        return ret;
 
                *val = ret / 1000;
@@ -255,22 +262,22 @@ static const struct iio_info cc10001_adc_info = {
        .update_scan_mode = &cc10001_update_scan_mode,
 };
 
-static int cc10001_adc_channel_init(struct iio_dev *indio_dev)
+static int cc10001_adc_channel_init(struct iio_dev *indio_dev,
+                                   unsigned long channel_map)
 {
-       struct cc10001_adc_device *adc_dev = iio_priv(indio_dev);
        struct iio_chan_spec *chan_array, *timestamp;
        unsigned int bit, idx = 0;
 
-       indio_dev->num_channels = bitmap_weight(&adc_dev->channel_map,
-                                               CC10001_ADC_NUM_CHANNELS);
+       indio_dev->num_channels = bitmap_weight(&channel_map,
+                                               CC10001_ADC_NUM_CHANNELS) + 1;
 
-       chan_array = devm_kcalloc(&indio_dev->dev, indio_dev->num_channels + 1,
+       chan_array = devm_kcalloc(&indio_dev->dev, indio_dev->num_channels,
                                  sizeof(struct iio_chan_spec),
                                  GFP_KERNEL);
        if (!chan_array)
                return -ENOMEM;
 
-       for_each_set_bit(bit, &adc_dev->channel_map, CC10001_ADC_NUM_CHANNELS) {
+       for_each_set_bit(bit, &channel_map, CC10001_ADC_NUM_CHANNELS) {
                struct iio_chan_spec *chan = &chan_array[idx];
 
                chan->type = IIO_VOLTAGE;
@@ -305,6 +312,7 @@ static int cc10001_adc_probe(struct platform_device *pdev)
        unsigned long adc_clk_rate;
        struct resource *res;
        struct iio_dev *indio_dev;
+       unsigned long channel_map;
        int ret;
 
        indio_dev = devm_iio_device_alloc(&pdev->dev, sizeof(*adc_dev));
@@ -313,9 +321,9 @@ static int cc10001_adc_probe(struct platform_device *pdev)
 
        adc_dev = iio_priv(indio_dev);
 
-       adc_dev->channel_map = GENMASK(CC10001_ADC_NUM_CHANNELS - 1, 0);
+       channel_map = GENMASK(CC10001_ADC_NUM_CHANNELS - 1, 0);
        if (!of_property_read_u32(node, "adc-reserved-channels", &ret))
-               adc_dev->channel_map &= ~ret;
+               channel_map &= ~ret;
 
        adc_dev->reg = devm_regulator_get(&pdev->dev, "vref");
        if (IS_ERR(adc_dev->reg))
@@ -361,7 +369,7 @@ static int cc10001_adc_probe(struct platform_device *pdev)
        adc_dev->start_delay_ns = adc_dev->eoc_delay_ns * CC10001_WAIT_CYCLES;
 
        /* Setup the ADC channels available on the device */
-       ret = cc10001_adc_channel_init(indio_dev);
+       ret = cc10001_adc_channel_init(indio_dev, channel_map);
        if (ret < 0)
                goto err_disable_clk;
 
index efbfd12a4bfdd9f764732d976a2650719408552c..8d9c9b9215ddc1ef5530df512d475a618cb71d8f 100644 (file)
@@ -60,12 +60,12 @@ struct mcp320x {
        struct spi_message msg;
        struct spi_transfer transfer[2];
 
-       u8 tx_buf;
-       u8 rx_buf[2];
-
        struct regulator *reg;
        struct mutex lock;
        const struct mcp320x_chip_info *chip_info;
+
+       u8 tx_buf ____cacheline_aligned;
+       u8 rx_buf[2];
 };
 
 static int mcp320x_channel_to_tx_data(int device_index,
index 3211729bcb0bd2fa3a53629c4e78c2c9c37579a2..0c4618b4d51549cb7bfe8fead1e3ce61c1882366 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/iio/iio.h>
 #include <linux/interrupt.h>
 #include <linux/kernel.h>
+#include <linux/math64.h>
 #include <linux/module.h>
 #include <linux/of.h>
 #include <linux/platform_device.h>
@@ -471,11 +472,11 @@ static s32 vadc_calibrate(struct vadc_priv *vadc,
                          const struct vadc_channel_prop *prop, u16 adc_code)
 {
        const struct vadc_prescale_ratio *prescale;
-       s32 voltage;
+       s64 voltage;
 
        voltage = adc_code - vadc->graph[prop->calibration].gnd;
        voltage *= vadc->graph[prop->calibration].dx;
-       voltage = voltage / vadc->graph[prop->calibration].dy;
+       voltage = div64_s64(voltage, vadc->graph[prop->calibration].dy);
 
        if (prop->calibration == VADC_CALIB_ABSOLUTE)
                voltage += vadc->graph[prop->calibration].dx;
@@ -487,7 +488,7 @@ static s32 vadc_calibrate(struct vadc_priv *vadc,
 
        voltage = voltage * prescale->den;
 
-       return voltage / prescale->num;
+       return div64_s64(voltage, prescale->num);
 }
 
 static int vadc_decimation_from_dt(u32 value)
index 89d8aa1d2818502f974c92f7925ea4440df3d97d..df12c57e6ce07a700d211b81c9b5d3c9c15ff2d3 100644 (file)
@@ -1001,7 +1001,7 @@ static struct platform_driver twl6030_gpadc_driver = {
 
 module_platform_driver(twl6030_gpadc_driver);
 
-MODULE_ALIAS("platform: " DRIVER_NAME);
+MODULE_ALIAS("platform:" DRIVER_NAME);
 MODULE_AUTHOR("Balaji T K <balajitk@ti.com>");
 MODULE_AUTHOR("Graeme Gregory <gg@slimlogic.co.uk>");
 MODULE_AUTHOR("Oleksandr Kozaruk <oleksandr.kozaruk@ti.com");
index a221f7329b7914449492f7a287f6524d4770a4c3..ce93bd8e3f68b82fec81b31f8f1885b908ef0b45 100644 (file)
@@ -856,6 +856,7 @@ static int xadc_read_raw(struct iio_dev *indio_dev,
                        switch (chan->address) {
                        case XADC_REG_VCCINT:
                        case XADC_REG_VCCAUX:
+                       case XADC_REG_VREFP:
                        case XADC_REG_VCCBRAM:
                        case XADC_REG_VCCPINT:
                        case XADC_REG_VCCPAUX:
@@ -996,7 +997,7 @@ static const struct iio_event_spec xadc_voltage_events[] = {
        .num_event_specs = (_alarm) ? ARRAY_SIZE(xadc_voltage_events) : 0, \
        .scan_index = (_scan_index), \
        .scan_type = { \
-               .sign = 'u', \
+               .sign = ((_addr) == XADC_REG_VREFN) ? 's' : 'u', \
                .realbits = 12, \
                .storagebits = 16, \
                .shift = 4, \
@@ -1008,7 +1009,7 @@ static const struct iio_event_spec xadc_voltage_events[] = {
 static const struct iio_chan_spec xadc_channels[] = {
        XADC_CHAN_TEMP(0, 8, XADC_REG_TEMP),
        XADC_CHAN_VOLTAGE(0, 9, XADC_REG_VCCINT, "vccint", true),
-       XADC_CHAN_VOLTAGE(1, 10, XADC_REG_VCCINT, "vccaux", true),
+       XADC_CHAN_VOLTAGE(1, 10, XADC_REG_VCCAUX, "vccaux", true),
        XADC_CHAN_VOLTAGE(2, 14, XADC_REG_VCCBRAM, "vccbram", true),
        XADC_CHAN_VOLTAGE(3, 5, XADC_REG_VCCPINT, "vccpint", true),
        XADC_CHAN_VOLTAGE(4, 6, XADC_REG_VCCPAUX, "vccpaux", true),
index c7487e8d7f809f24b5616365408c5301fea196ad..54adc5087210bb173afd62bfa36712e4a014eafb 100644 (file)
@@ -145,9 +145,9 @@ static inline int xadc_write_adc_reg(struct xadc *xadc, unsigned int reg,
 #define XADC_REG_MAX_VCCPINT   0x28
 #define XADC_REG_MAX_VCCPAUX   0x29
 #define XADC_REG_MAX_VCCO_DDR  0x2a
-#define XADC_REG_MIN_VCCPINT   0x2b
-#define XADC_REG_MIN_VCCPAUX   0x2c
-#define XADC_REG_MIN_VCCO_DDR  0x2d
+#define XADC_REG_MIN_VCCPINT   0x2c
+#define XADC_REG_MIN_VCCPAUX   0x2d
+#define XADC_REG_MIN_VCCO_DDR  0x2e
 
 #define XADC_REG_CONF0         0x40
 #define XADC_REG_CONF1         0x41
index edd13d2b4121f7834c2e90eaca02e7a4f70c51b7..8dd0477e201c192bed10a8c5384f012bfe083ff8 100644 (file)
@@ -304,8 +304,6 @@ int st_sensors_init_sensor(struct iio_dev *indio_dev,
        struct st_sensors_platform_data *of_pdata;
        int err = 0;
 
-       mutex_init(&sdata->tb.buf_lock);
-
        /* If OF/DT pdata exists, it will take precedence of anything else */
        of_pdata = st_sensors_of_probe(indio_dev->dev.parent, pdata);
        if (of_pdata)
index 21395f26d2276548567dfd118f37c22087a0d1ce..ffe96642b6d049569d73356a1df541782657a912 100644 (file)
@@ -400,6 +400,7 @@ int st_gyro_common_probe(struct iio_dev *indio_dev)
 
        indio_dev->modes = INDIO_DIRECT_MODE;
        indio_dev->info = &gyro_info;
+       mutex_init(&gdata->tb.buf_lock);
 
        st_sensors_power_enable(indio_dev);
 
index 0916bf6b6c311c503931f26387712c6677b17645..73b189c1c0fb0fdcc73d64b7118ab4f79a12d41b 100644 (file)
 #define ADIS16400_NO_BURST             BIT(1)
 #define ADIS16400_HAS_SLOW_MODE                BIT(2)
 #define ADIS16400_HAS_SERIAL_NUMBER    BIT(3)
+#define ADIS16400_BURST_DIAG_STAT      BIT(4)
 
 struct adis16400_state;
 
@@ -165,6 +166,7 @@ struct adis16400_state {
        int                             filt_int;
 
        struct adis adis;
+       unsigned long avail_scan_mask[2];
 };
 
 /* At the moment triggers are only used for ring buffer
index 6e727ffe52621f43bb40f31466730705477961ef..90c24a23c679b8001e31cdff48b098cacb872682 100644 (file)
@@ -18,7 +18,8 @@ int adis16400_update_scan_mode(struct iio_dev *indio_dev,
 {
        struct adis16400_state *st = iio_priv(indio_dev);
        struct adis *adis = &st->adis;
-       uint16_t *tx;
+       unsigned int burst_length;
+       u8 *tx;
 
        if (st->variant->flags & ADIS16400_NO_BURST)
                return adis_update_scan_mode(indio_dev, scan_mask);
@@ -26,26 +27,29 @@ int adis16400_update_scan_mode(struct iio_dev *indio_dev,
        kfree(adis->xfer);
        kfree(adis->buffer);
 
+       /* All but the timestamp channel */
+       burst_length = (indio_dev->num_channels - 1) * sizeof(u16);
+       if (st->variant->flags & ADIS16400_BURST_DIAG_STAT)
+               burst_length += sizeof(u16);
+
        adis->xfer = kcalloc(2, sizeof(*adis->xfer), GFP_KERNEL);
        if (!adis->xfer)
                return -ENOMEM;
 
-       adis->buffer = kzalloc(indio_dev->scan_bytes + sizeof(u16),
-               GFP_KERNEL);
+       adis->buffer = kzalloc(burst_length + sizeof(u16), GFP_KERNEL);
        if (!adis->buffer)
                return -ENOMEM;
 
-       tx = adis->buffer + indio_dev->scan_bytes;
-
+       tx = adis->buffer + burst_length;
        tx[0] = ADIS_READ_REG(ADIS16400_GLOB_CMD);
        tx[1] = 0;
 
        adis->xfer[0].tx_buf = tx;
        adis->xfer[0].bits_per_word = 8;
        adis->xfer[0].len = 2;
-       adis->xfer[1].tx_buf = tx;
+       adis->xfer[1].rx_buf = adis->buffer;
        adis->xfer[1].bits_per_word = 8;
-       adis->xfer[1].len = indio_dev->scan_bytes;
+       adis->xfer[1].len = burst_length;
 
        spi_message_init(&adis->msg);
        spi_message_add_tail(&adis->xfer[0], &adis->msg);
@@ -61,6 +65,7 @@ irqreturn_t adis16400_trigger_handler(int irq, void *p)
        struct adis16400_state *st = iio_priv(indio_dev);
        struct adis *adis = &st->adis;
        u32 old_speed_hz = st->adis.spi->max_speed_hz;
+       void *buffer;
        int ret;
 
        if (!adis->buffer)
@@ -81,7 +86,12 @@ irqreturn_t adis16400_trigger_handler(int irq, void *p)
                spi_setup(st->adis.spi);
        }
 
-       iio_push_to_buffers_with_timestamp(indio_dev, adis->buffer,
+       if (st->variant->flags & ADIS16400_BURST_DIAG_STAT)
+               buffer = adis->buffer + sizeof(u16);
+       else
+               buffer = adis->buffer;
+
+       iio_push_to_buffers_with_timestamp(indio_dev, buffer,
                pf->timestamp);
 
        iio_trigger_notify_done(indio_dev->trig);
index fa795dcd5f75ec0a1e8de143bc0122ef36bf9409..2fd68f2219a7d422a604b91ce90138f1050528cd 100644 (file)
@@ -405,6 +405,11 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
                        *val = st->variant->temp_scale_nano / 1000000;
                        *val2 = (st->variant->temp_scale_nano % 1000000);
                        return IIO_VAL_INT_PLUS_MICRO;
+               case IIO_PRESSURE:
+                       /* 20 uBar = 0.002kPascal */
+                       *val = 0;
+                       *val2 = 2000;
+                       return IIO_VAL_INT_PLUS_MICRO;
                default:
                        return -EINVAL;
                }
@@ -454,10 +459,10 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
        }
 }
 
-#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si) { \
+#define ADIS16400_VOLTAGE_CHAN(addr, bits, name, si, chn) { \
        .type = IIO_VOLTAGE, \
        .indexed = 1, \
-       .channel = 0, \
+       .channel = chn, \
        .extend_name = name, \
        .info_mask_separate = BIT(IIO_CHAN_INFO_RAW) | \
                BIT(IIO_CHAN_INFO_SCALE), \
@@ -474,10 +479,10 @@ static int adis16400_read_raw(struct iio_dev *indio_dev,
 }
 
 #define ADIS16400_SUPPLY_CHAN(addr, bits) \
-       ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY)
+       ADIS16400_VOLTAGE_CHAN(addr, bits, "supply", ADIS16400_SCAN_SUPPLY, 0)
 
 #define ADIS16400_AUX_ADC_CHAN(addr, bits) \
-       ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC)
+       ADIS16400_VOLTAGE_CHAN(addr, bits, NULL, ADIS16400_SCAN_ADC, 1)
 
 #define ADIS16400_GYRO_CHAN(mod, addr, bits) { \
        .type = IIO_ANGL_VEL, \
@@ -773,7 +778,8 @@ static struct adis16400_chip_info adis16400_chips[] = {
                .channels = adis16448_channels,
                .num_channels = ARRAY_SIZE(adis16448_channels),
                .flags = ADIS16400_HAS_PROD_ID |
-                               ADIS16400_HAS_SERIAL_NUMBER,
+                               ADIS16400_HAS_SERIAL_NUMBER |
+                               ADIS16400_BURST_DIAG_STAT,
                .gyro_scale_micro = IIO_DEGREE_TO_RAD(10000), /* 0.01 deg/s */
                .accel_scale_micro = IIO_G_TO_M_S_2(833), /* 1/1200 g */
                .temp_scale_nano = 73860000, /* 0.07386 C */
@@ -791,11 +797,6 @@ static const struct iio_info adis16400_info = {
        .debugfs_reg_access = adis_debugfs_reg_access,
 };
 
-static const unsigned long adis16400_burst_scan_mask[] = {
-       ~0UL,
-       0,
-};
-
 static const char * const adis16400_status_error_msgs[] = {
        [ADIS16400_DIAG_STAT_ZACCL_FAIL] = "Z-axis accelerometer self-test failure",
        [ADIS16400_DIAG_STAT_YACCL_FAIL] = "Y-axis accelerometer self-test failure",
@@ -843,6 +844,20 @@ static const struct adis_data adis16400_data = {
                BIT(ADIS16400_DIAG_STAT_POWER_LOW),
 };
 
+static void adis16400_setup_chan_mask(struct adis16400_state *st)
+{
+       const struct adis16400_chip_info *chip_info = st->variant;
+       unsigned i;
+
+       for (i = 0; i < chip_info->num_channels; i++) {
+               const struct iio_chan_spec *ch = &chip_info->channels[i];
+
+               if (ch->scan_index >= 0 &&
+                   ch->scan_index != ADIS16400_SCAN_TIMESTAMP)
+                       st->avail_scan_mask[0] |= BIT(ch->scan_index);
+       }
+}
+
 static int adis16400_probe(struct spi_device *spi)
 {
        struct adis16400_state *st;
@@ -866,8 +881,10 @@ static int adis16400_probe(struct spi_device *spi)
        indio_dev->info = &adis16400_info;
        indio_dev->modes = INDIO_DIRECT_MODE;
 
-       if (!(st->variant->flags & ADIS16400_NO_BURST))
-               indio_dev->available_scan_masks = adis16400_burst_scan_mask;
+       if (!(st->variant->flags & ADIS16400_NO_BURST)) {
+               adis16400_setup_chan_mask(st);
+               indio_dev->available_scan_masks = st->avail_scan_mask;
+       }
 
        ret = adis_init(&st->adis, indio_dev, spi, &adis16400_data);
        if (ret)
index 847ca561afe014e83707d04eaef02054f9b2bbfa..55c267bbfd2f9293340fa2851bcf5cb245b22cc9 100644 (file)
@@ -38,7 +38,8 @@ static int iio_request_update_kfifo(struct iio_buffer *r)
                kfifo_free(&buf->kf);
                ret = __iio_allocate_kfifo(buf, buf->buffer.bytes_per_datum,
                                   buf->buffer.length);
-               buf->update_needed = false;
+               if (ret >= 0)
+                       buf->update_needed = false;
        } else {
                kfifo_reset_out(&buf->kf);
        }
index 91ecc46ffeaa0b9e5ecf25d3575d8ab364e4abe8..ef60bae738e344c510d529578051f7767eba7916 100644 (file)
@@ -43,8 +43,6 @@ struct prox_state {
 static const struct iio_chan_spec prox_channels[] = {
        {
                .type = IIO_PROXIMITY,
-               .modified = 1,
-               .channel2 = IIO_NO_MOD,
                .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
                .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
                BIT(IIO_CHAN_INFO_SCALE) |
@@ -253,7 +251,6 @@ static int hid_prox_probe(struct platform_device *pdev)
        struct iio_dev *indio_dev;
        struct prox_state *prox_state;
        struct hid_sensor_hub_device *hsdev = pdev->dev.platform_data;
-       struct iio_chan_spec *channels;
 
        indio_dev = devm_iio_device_alloc(&pdev->dev,
                                sizeof(struct prox_state));
@@ -272,20 +269,21 @@ static int hid_prox_probe(struct platform_device *pdev)
                return ret;
        }
 
-       channels = kmemdup(prox_channels, sizeof(prox_channels), GFP_KERNEL);
-       if (!channels) {
+       indio_dev->channels = kmemdup(prox_channels, sizeof(prox_channels),
+                                     GFP_KERNEL);
+       if (!indio_dev->channels) {
                dev_err(&pdev->dev, "failed to duplicate channels\n");
                return -ENOMEM;
        }
 
-       ret = prox_parse_report(pdev, hsdev, channels,
+       ret = prox_parse_report(pdev, hsdev,
+                               (struct iio_chan_spec *)indio_dev->channels,
                                HID_USAGE_SENSOR_PROX, prox_state);
        if (ret) {
                dev_err(&pdev->dev, "failed to setup attributes\n");
                goto error_free_dev_mem;
        }
 
-       indio_dev->channels = channels;
        indio_dev->num_channels =
                                ARRAY_SIZE(prox_channels);
        indio_dev->dev.parent = &pdev->dev;
index 8ade473f99fee3ab35cb224eab0f69afebe2bf70..2e56f812a644d67a73f0e9bdccd0992c39777a25 100644 (file)
@@ -369,6 +369,7 @@ int st_magn_common_probe(struct iio_dev *indio_dev)
 
        indio_dev->modes = INDIO_DIRECT_MODE;
        indio_dev->info = &magn_info;
+       mutex_init(&mdata->tb.buf_lock);
 
        st_sensors_power_enable(indio_dev);
 
index 7c623e2bd6336c0968254e73547de8945efe4a42..a2602d8dd6d5ceea974cb1fdada67ddde7a2f129 100644 (file)
@@ -172,6 +172,7 @@ static s32 bmp280_compensate_temp(struct bmp280_data *data,
        var2 = (((((adc_temp >> 4) - ((s32)le16_to_cpu(buf[T1]))) *
                  ((adc_temp >> 4) - ((s32)le16_to_cpu(buf[T1])))) >> 12) *
                ((s32)(s16)le16_to_cpu(buf[T3]))) >> 14;
+       data->t_fine = var1 + var2;
 
        return (data->t_fine * 5 + 128) >> 8;
 }
index 7bb8d4c1f7df4922279dd328ddfc83fba1b113c4..3cf0bd67d24ca8001224f960513bbd93b45be1e5 100644 (file)
@@ -47,8 +47,6 @@ struct press_state {
 static const struct iio_chan_spec press_channels[] = {
        {
                .type = IIO_PRESSURE,
-               .modified = 1,
-               .channel2 = IIO_NO_MOD,
                .info_mask_separate = BIT(IIO_CHAN_INFO_RAW),
                .info_mask_shared_by_type = BIT(IIO_CHAN_INFO_OFFSET) |
                BIT(IIO_CHAN_INFO_SCALE) |
index 97baf40d424bd599687b124062081364f94b487e..e881fa6291e9228a56cc0a7f3d6cf3ee51621855 100644 (file)
@@ -417,6 +417,7 @@ int st_press_common_probe(struct iio_dev *indio_dev)
 
        indio_dev->modes = INDIO_DIRECT_MODE;
        indio_dev->info = &press_info;
+       mutex_init(&press_data->tb.buf_lock);
 
        st_sensors_power_enable(indio_dev);
 
index f80da50d84a5b6585a10656b369bcc33aedd0507..38339d220d7f52c402c08f581f20cf4ad8acea49 100644 (file)
@@ -472,13 +472,8 @@ int rdma_addr_find_dmac_by_grh(union ib_gid *sgid, union ib_gid *dgid, u8 *dmac,
        } sgid_addr, dgid_addr;
 
 
-       ret = rdma_gid2ip(&sgid_addr._sockaddr, sgid);
-       if (ret)
-               return ret;
-
-       ret = rdma_gid2ip(&dgid_addr._sockaddr, dgid);
-       if (ret)
-               return ret;
+       rdma_gid2ip(&sgid_addr._sockaddr, sgid);
+       rdma_gid2ip(&dgid_addr._sockaddr, dgid);
 
        memset(&dev_addr, 0, sizeof(dev_addr));
 
@@ -512,10 +507,8 @@ int rdma_addr_find_smac_by_sgid(union ib_gid *sgid, u8 *smac, u16 *vlan_id)
                struct sockaddr_in6 _sockaddr_in6;
        } gid_addr;
 
-       ret = rdma_gid2ip(&gid_addr._sockaddr, sgid);
+       rdma_gid2ip(&gid_addr._sockaddr, sgid);
 
-       if (ret)
-               return ret;
        memset(&dev_addr, 0, sizeof(dev_addr));
        ret = rdma_translate_ip(&gid_addr._sockaddr, &dev_addr, vlan_id);
        if (ret)
index e28a494e2a3a0f72b41af479b269262c6472cb77..0271608a51c40ff2ade721f94b710997ffbe5c2d 100644 (file)
@@ -437,39 +437,38 @@ static struct cm_id_private * cm_acquire_id(__be32 local_id, __be32 remote_id)
        return cm_id_priv;
 }
 
-static void cm_mask_copy(u8 *dst, u8 *src, u8 *mask)
+static void cm_mask_copy(u32 *dst, const u32 *src, const u32 *mask)
 {
        int i;
 
-       for (i = 0; i < IB_CM_COMPARE_SIZE / sizeof(unsigned long); i++)
-               ((unsigned long *) dst)[i] = ((unsigned long *) src)[i] &
-                                            ((unsigned long *) mask)[i];
+       for (i = 0; i < IB_CM_COMPARE_SIZE; i++)
+               dst[i] = src[i] & mask[i];
 }
 
 static int cm_compare_data(struct ib_cm_compare_data *src_data,
                           struct ib_cm_compare_data *dst_data)
 {
-       u8 src[IB_CM_COMPARE_SIZE];
-       u8 dst[IB_CM_COMPARE_SIZE];
+       u32 src[IB_CM_COMPARE_SIZE];
+       u32 dst[IB_CM_COMPARE_SIZE];
 
        if (!src_data || !dst_data)
                return 0;
 
        cm_mask_copy(src, src_data->data, dst_data->mask);
        cm_mask_copy(dst, dst_data->data, src_data->mask);
-       return memcmp(src, dst, IB_CM_COMPARE_SIZE);
+       return memcmp(src, dst, sizeof(src));
 }
 
-static int cm_compare_private_data(u8 *private_data,
+static int cm_compare_private_data(u32 *private_data,
                                   struct ib_cm_compare_data *dst_data)
 {
-       u8 src[IB_CM_COMPARE_SIZE];
+       u32 src[IB_CM_COMPARE_SIZE];
 
        if (!dst_data)
                return 0;
 
        cm_mask_copy(src, private_data, dst_data->mask);
-       return memcmp(src, dst_data->data, IB_CM_COMPARE_SIZE);
+       return memcmp(src, dst_data->data, sizeof(src));
 }
 
 /*
@@ -538,7 +537,7 @@ static struct cm_id_private * cm_insert_listen(struct cm_id_private *cm_id_priv)
 
 static struct cm_id_private * cm_find_listen(struct ib_device *device,
                                             __be64 service_id,
-                                            u8 *private_data)
+                                            u32 *private_data)
 {
        struct rb_node *node = cm.listen_service_table.rb_node;
        struct cm_id_private *cm_id_priv;
@@ -862,6 +861,7 @@ retest:
                cm_reject_sidr_req(cm_id_priv, IB_SIDR_REJECT);
                break;
        case IB_CM_REQ_SENT:
+       case IB_CM_MRA_REQ_RCVD:
                ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
                spin_unlock_irq(&cm_id_priv->lock);
                ib_send_cm_rej(cm_id, IB_CM_REJ_TIMEOUT,
@@ -880,7 +880,6 @@ retest:
                                       NULL, 0, NULL, 0);
                }
                break;
-       case IB_CM_MRA_REQ_RCVD:
        case IB_CM_REP_SENT:
        case IB_CM_MRA_REP_RCVD:
                ib_cancel_mad(cm_id_priv->av.port->mad_agent, cm_id_priv->msg);
@@ -953,7 +952,7 @@ int ib_cm_listen(struct ib_cm_id *cm_id, __be64 service_id, __be64 service_mask,
                cm_mask_copy(cm_id_priv->compare_data->data,
                             compare_data->data, compare_data->mask);
                memcpy(cm_id_priv->compare_data->mask, compare_data->mask,
-                      IB_CM_COMPARE_SIZE);
+                      sizeof(compare_data->mask));
        }
 
        cm_id->state = IB_CM_LISTEN;
index be068f47e47e86b79065356e62bf1d193c1da36f..8b76f0ef965e88d7171e8cebcbaf2a0c9a29f962 100644 (file)
@@ -103,7 +103,7 @@ struct cm_req_msg {
        /* local ACK timeout:5, rsvd:3 */
        u8 alt_offset139;
 
-       u8 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE];
+       u32 private_data[IB_CM_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
 
 } __attribute__ ((packed));
 
@@ -801,7 +801,7 @@ struct cm_sidr_req_msg {
        __be16 rsvd;
        __be64 service_id;
 
-       u8 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE];
+       u32 private_data[IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE / sizeof(u32)];
 } __attribute__ ((packed));
 
 struct cm_sidr_rep_msg {
index d570030d899c0c662d2b208ce30dd2cd20bc3f78..38ffe098150351aef9ff2ac650726ae7926f6856 100644 (file)
@@ -845,33 +845,49 @@ static void cma_save_ib_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id
        listen_ib = (struct sockaddr_ib *) &listen_id->route.addr.src_addr;
        ib = (struct sockaddr_ib *) &id->route.addr.src_addr;
        ib->sib_family = listen_ib->sib_family;
-       ib->sib_pkey = path->pkey;
-       ib->sib_flowinfo = path->flow_label;
-       memcpy(&ib->sib_addr, &path->sgid, 16);
+       if (path) {
+               ib->sib_pkey = path->pkey;
+               ib->sib_flowinfo = path->flow_label;
+               memcpy(&ib->sib_addr, &path->sgid, 16);
+       } else {
+               ib->sib_pkey = listen_ib->sib_pkey;
+               ib->sib_flowinfo = listen_ib->sib_flowinfo;
+               ib->sib_addr = listen_ib->sib_addr;
+       }
        ib->sib_sid = listen_ib->sib_sid;
        ib->sib_sid_mask = cpu_to_be64(0xffffffffffffffffULL);
        ib->sib_scope_id = listen_ib->sib_scope_id;
 
-       ib = (struct sockaddr_ib *) &id->route.addr.dst_addr;
-       ib->sib_family = listen_ib->sib_family;
-       ib->sib_pkey = path->pkey;
-       ib->sib_flowinfo = path->flow_label;
-       memcpy(&ib->sib_addr, &path->dgid, 16);
+       if (path) {
+               ib = (struct sockaddr_ib *) &id->route.addr.dst_addr;
+               ib->sib_family = listen_ib->sib_family;
+               ib->sib_pkey = path->pkey;
+               ib->sib_flowinfo = path->flow_label;
+               memcpy(&ib->sib_addr, &path->dgid, 16);
+       }
+}
+
+static __be16 ss_get_port(const struct sockaddr_storage *ss)
+{
+       if (ss->ss_family == AF_INET)
+               return ((struct sockaddr_in *)ss)->sin_port;
+       else if (ss->ss_family == AF_INET6)
+               return ((struct sockaddr_in6 *)ss)->sin6_port;
+       BUG();
 }
 
 static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
                              struct cma_hdr *hdr)
 {
-       struct sockaddr_in *listen4, *ip4;
+       struct sockaddr_in *ip4;
 
-       listen4 = (struct sockaddr_in *) &listen_id->route.addr.src_addr;
        ip4 = (struct sockaddr_in *) &id->route.addr.src_addr;
-       ip4->sin_family = listen4->sin_family;
+       ip4->sin_family = AF_INET;
        ip4->sin_addr.s_addr = hdr->dst_addr.ip4.addr;
-       ip4->sin_port = listen4->sin_port;
+       ip4->sin_port = ss_get_port(&listen_id->route.addr.src_addr);
 
        ip4 = (struct sockaddr_in *) &id->route.addr.dst_addr;
-       ip4->sin_family = listen4->sin_family;
+       ip4->sin_family = AF_INET;
        ip4->sin_addr.s_addr = hdr->src_addr.ip4.addr;
        ip4->sin_port = hdr->port;
 }
@@ -879,16 +895,15 @@ static void cma_save_ip4_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_i
 static void cma_save_ip6_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id,
                              struct cma_hdr *hdr)
 {
-       struct sockaddr_in6 *listen6, *ip6;
+       struct sockaddr_in6 *ip6;
 
-       listen6 = (struct sockaddr_in6 *) &listen_id->route.addr.src_addr;
        ip6 = (struct sockaddr_in6 *) &id->route.addr.src_addr;
-       ip6->sin6_family = listen6->sin6_family;
+       ip6->sin6_family = AF_INET6;
        ip6->sin6_addr = hdr->dst_addr.ip6;
-       ip6->sin6_port = listen6->sin6_port;
+       ip6->sin6_port = ss_get_port(&listen_id->route.addr.src_addr);
 
        ip6 = (struct sockaddr_in6 *) &id->route.addr.dst_addr;
-       ip6->sin6_family = listen6->sin6_family;
+       ip6->sin6_family = AF_INET6;
        ip6->sin6_addr = hdr->src_addr.ip6;
        ip6->sin6_port = hdr->port;
 }
@@ -898,9 +913,11 @@ static int cma_save_net_info(struct rdma_cm_id *id, struct rdma_cm_id *listen_id
 {
        struct cma_hdr *hdr;
 
-       if ((listen_id->route.addr.src_addr.ss_family == AF_IB) &&
-           (ib_event->event == IB_CM_REQ_RECEIVED)) {
-               cma_save_ib_info(id, listen_id, ib_event->param.req_rcvd.primary_path);
+       if (listen_id->route.addr.src_addr.ss_family == AF_IB) {
+               if (ib_event->event == IB_CM_REQ_RECEIVED)
+                       cma_save_ib_info(id, listen_id, ib_event->param.req_rcvd.primary_path);
+               else if (ib_event->event == IB_CM_SIDR_REQ_RECEIVED)
+                       cma_save_ib_info(id, listen_id, NULL);
                return 0;
        }
 
index b85ddbc979e069e909cee12ac37cd31be244652d..e6ffa2e66c1ac54b7a2645f59bbb28c00bc91cc3 100644 (file)
@@ -33,7 +33,7 @@
 
 #include "iwpm_util.h"
 
-static const char iwpm_ulib_name[] = "iWarpPortMapperUser";
+static const char iwpm_ulib_name[IWPM_ULIBNAME_SIZE] = "iWarpPortMapperUser";
 static int iwpm_ulib_version = 3;
 static int iwpm_user_pid = IWPM_PID_UNDEFINED;
 static atomic_t echo_nlmsg_seq;
@@ -468,7 +468,8 @@ add_mapping_response_exit:
 }
 EXPORT_SYMBOL(iwpm_add_mapping_cb);
 
-/* netlink attribute policy for the response to add and query mapping request */
+/* netlink attribute policy for the response to add and query mapping request
+ * and response with remote address info */
 static const struct nla_policy resp_query_policy[IWPM_NLA_RQUERY_MAPPING_MAX] = {
        [IWPM_NLA_QUERY_MAPPING_SEQ]      = { .type = NLA_U32 },
        [IWPM_NLA_QUERY_LOCAL_ADDR]       = { .len = sizeof(struct sockaddr_storage) },
@@ -559,6 +560,76 @@ query_mapping_response_exit:
 }
 EXPORT_SYMBOL(iwpm_add_and_query_mapping_cb);
 
+/*
+ * iwpm_remote_info_cb - Process a port mapper message, containing
+ *                       the remote connecting peer address info
+ */
+int iwpm_remote_info_cb(struct sk_buff *skb, struct netlink_callback *cb)
+{
+       struct nlattr *nltb[IWPM_NLA_RQUERY_MAPPING_MAX];
+       struct sockaddr_storage *local_sockaddr, *remote_sockaddr;
+       struct sockaddr_storage *mapped_loc_sockaddr, *mapped_rem_sockaddr;
+       struct iwpm_remote_info *rem_info;
+       const char *msg_type;
+       u8 nl_client;
+       int ret = -EINVAL;
+
+       msg_type = "Remote Mapping info";
+       if (iwpm_parse_nlmsg(cb, IWPM_NLA_RQUERY_MAPPING_MAX,
+                               resp_query_policy, nltb, msg_type))
+               return ret;
+
+       nl_client = RDMA_NL_GET_CLIENT(cb->nlh->nlmsg_type);
+       if (!iwpm_valid_client(nl_client)) {
+               pr_info("%s: Invalid port mapper client = %d\n",
+                               __func__, nl_client);
+               return ret;
+       }
+       atomic_set(&echo_nlmsg_seq, cb->nlh->nlmsg_seq);
+
+       local_sockaddr = (struct sockaddr_storage *)
+                       nla_data(nltb[IWPM_NLA_QUERY_LOCAL_ADDR]);
+       remote_sockaddr = (struct sockaddr_storage *)
+                       nla_data(nltb[IWPM_NLA_QUERY_REMOTE_ADDR]);
+       mapped_loc_sockaddr = (struct sockaddr_storage *)
+                       nla_data(nltb[IWPM_NLA_RQUERY_MAPPED_LOC_ADDR]);
+       mapped_rem_sockaddr = (struct sockaddr_storage *)
+                       nla_data(nltb[IWPM_NLA_RQUERY_MAPPED_REM_ADDR]);
+
+       if (mapped_loc_sockaddr->ss_family != local_sockaddr->ss_family ||
+               mapped_rem_sockaddr->ss_family != remote_sockaddr->ss_family) {
+               pr_info("%s: Sockaddr family doesn't match the requested one\n",
+                               __func__);
+               return ret;
+       }
+       rem_info = kzalloc(sizeof(struct iwpm_remote_info), GFP_ATOMIC);
+       if (!rem_info) {
+               pr_err("%s: Unable to allocate a remote info\n", __func__);
+               ret = -ENOMEM;
+               return ret;
+       }
+       memcpy(&rem_info->mapped_loc_sockaddr, mapped_loc_sockaddr,
+              sizeof(struct sockaddr_storage));
+       memcpy(&rem_info->remote_sockaddr, remote_sockaddr,
+              sizeof(struct sockaddr_storage));
+       memcpy(&rem_info->mapped_rem_sockaddr, mapped_rem_sockaddr,
+              sizeof(struct sockaddr_storage));
+       rem_info->nl_client = nl_client;
+
+       iwpm_add_remote_info(rem_info);
+
+       iwpm_print_sockaddr(local_sockaddr,
+                       "remote_info: Local sockaddr:");
+       iwpm_print_sockaddr(mapped_loc_sockaddr,
+                       "remote_info: Mapped local sockaddr:");
+       iwpm_print_sockaddr(remote_sockaddr,
+                       "remote_info: Remote sockaddr:");
+       iwpm_print_sockaddr(mapped_rem_sockaddr,
+                       "remote_info: Mapped remote sockaddr:");
+       return ret;
+}
+EXPORT_SYMBOL(iwpm_remote_info_cb);
+
 /* netlink attribute policy for the received request for mapping info */
 static const struct nla_policy resp_mapinfo_policy[IWPM_NLA_MAPINFO_REQ_MAX] = {
        [IWPM_NLA_MAPINFO_ULIB_NAME] = { .type = NLA_STRING,
index 69e9f84c16056246bd5d45ca08293ce5b272ec84..a626795bf9c71f43f7d526d07ae3a490399fdb79 100644 (file)
 
 #include "iwpm_util.h"
 
-#define IWPM_HASH_BUCKET_SIZE  512
-#define IWPM_HASH_BUCKET_MASK  (IWPM_HASH_BUCKET_SIZE - 1)
+#define IWPM_MAPINFO_HASH_SIZE 512
+#define IWPM_MAPINFO_HASH_MASK (IWPM_MAPINFO_HASH_SIZE - 1)
+#define IWPM_REMINFO_HASH_SIZE 64
+#define IWPM_REMINFO_HASH_MASK (IWPM_REMINFO_HASH_SIZE - 1)
 
 static LIST_HEAD(iwpm_nlmsg_req_list);
 static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock);
@@ -42,31 +44,49 @@ static DEFINE_SPINLOCK(iwpm_nlmsg_req_lock);
 static struct hlist_head *iwpm_hash_bucket;
 static DEFINE_SPINLOCK(iwpm_mapinfo_lock);
 
+static struct hlist_head *iwpm_reminfo_bucket;
+static DEFINE_SPINLOCK(iwpm_reminfo_lock);
+
 static DEFINE_MUTEX(iwpm_admin_lock);
 static struct iwpm_admin_data iwpm_admin;
 
 int iwpm_init(u8 nl_client)
 {
+       int ret = 0;
        if (iwpm_valid_client(nl_client))
                return -EINVAL;
        mutex_lock(&iwpm_admin_lock);
        if (atomic_read(&iwpm_admin.refcount) == 0) {
-               iwpm_hash_bucket = kzalloc(IWPM_HASH_BUCKET_SIZE *
+               iwpm_hash_bucket = kzalloc(IWPM_MAPINFO_HASH_SIZE *
                                        sizeof(struct hlist_head), GFP_KERNEL);
                if (!iwpm_hash_bucket) {
-                       mutex_unlock(&iwpm_admin_lock);
+                       ret = -ENOMEM;
                        pr_err("%s Unable to create mapinfo hash table\n", __func__);
-                       return -ENOMEM;
+                       goto init_exit;
+               }
+               iwpm_reminfo_bucket = kzalloc(IWPM_REMINFO_HASH_SIZE *
+                                       sizeof(struct hlist_head), GFP_KERNEL);
+               if (!iwpm_reminfo_bucket) {
+                       kfree(iwpm_hash_bucket);
+                       ret = -ENOMEM;
+                       pr_err("%s Unable to create reminfo hash table\n", __func__);
+                       goto init_exit;
                }
        }
        atomic_inc(&iwpm_admin.refcount);
+init_exit:
        mutex_unlock(&iwpm_admin_lock);
-       iwpm_set_valid(nl_client, 1);
-       return 0;
+       if (!ret) {
+               iwpm_set_valid(nl_client, 1);
+               pr_debug("%s: Mapinfo and reminfo tables are created\n",
+                               __func__);
+       }
+       return ret;
 }
 EXPORT_SYMBOL(iwpm_init);
 
 static void free_hash_bucket(void);
+static void free_reminfo_bucket(void);
 
 int iwpm_exit(u8 nl_client)
 {
@@ -81,7 +101,8 @@ int iwpm_exit(u8 nl_client)
        }
        if (atomic_dec_and_test(&iwpm_admin.refcount)) {
                free_hash_bucket();
-               pr_debug("%s: Mapinfo hash table is destroyed\n", __func__);
+               free_reminfo_bucket();
+               pr_debug("%s: Resources are destroyed\n", __func__);
        }
        mutex_unlock(&iwpm_admin_lock);
        iwpm_set_valid(nl_client, 0);
@@ -89,7 +110,7 @@ int iwpm_exit(u8 nl_client)
 }
 EXPORT_SYMBOL(iwpm_exit);
 
-static struct hlist_head *get_hash_bucket_head(struct sockaddr_storage *,
+static struct hlist_head *get_mapinfo_hash_bucket(struct sockaddr_storage *,
                                               struct sockaddr_storage *);
 
 int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
@@ -99,9 +120,10 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
        struct hlist_head *hash_bucket_head;
        struct iwpm_mapping_info *map_info;
        unsigned long flags;
+       int ret = -EINVAL;
 
        if (!iwpm_valid_client(nl_client))
-               return -EINVAL;
+               return ret;
        map_info = kzalloc(sizeof(struct iwpm_mapping_info), GFP_KERNEL);
        if (!map_info) {
                pr_err("%s: Unable to allocate a mapping info\n", __func__);
@@ -115,13 +137,16 @@ int iwpm_create_mapinfo(struct sockaddr_storage *local_sockaddr,
 
        spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
        if (iwpm_hash_bucket) {
-               hash_bucket_head = get_hash_bucket_head(
+               hash_bucket_head = get_mapinfo_hash_bucket(
                                        &map_info->local_sockaddr,
                                        &map_info->mapped_sockaddr);
-               hlist_add_head(&map_info->hlist_node, hash_bucket_head);
+               if (hash_bucket_head) {
+                       hlist_add_head(&map_info->hlist_node, hash_bucket_head);
+                       ret = 0;
+               }
        }
        spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
-       return 0;
+       return ret;
 }
 EXPORT_SYMBOL(iwpm_create_mapinfo);
 
@@ -136,9 +161,12 @@ int iwpm_remove_mapinfo(struct sockaddr_storage *local_sockaddr,
 
        spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
        if (iwpm_hash_bucket) {
-               hash_bucket_head = get_hash_bucket_head(
+               hash_bucket_head = get_mapinfo_hash_bucket(
                                        local_sockaddr,
                                        mapped_local_addr);
+               if (!hash_bucket_head)
+                       goto remove_mapinfo_exit;
+
                hlist_for_each_entry_safe(map_info, tmp_hlist_node,
                                        hash_bucket_head, hlist_node) {
 
@@ -152,6 +180,7 @@ int iwpm_remove_mapinfo(struct sockaddr_storage *local_sockaddr,
                        }
                }
        }
+remove_mapinfo_exit:
        spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
        return ret;
 }
@@ -166,7 +195,7 @@ static void free_hash_bucket(void)
 
        /* remove all the mapinfo data from the list */
        spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
-       for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) {
+       for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) {
                hlist_for_each_entry_safe(map_info, tmp_hlist_node,
                        &iwpm_hash_bucket[i], hlist_node) {
 
@@ -180,6 +209,96 @@ static void free_hash_bucket(void)
        spin_unlock_irqrestore(&iwpm_mapinfo_lock, flags);
 }
 
+static void free_reminfo_bucket(void)
+{
+       struct hlist_node *tmp_hlist_node;
+       struct iwpm_remote_info *rem_info;
+       unsigned long flags;
+       int i;
+
+       /* remove all the remote info from the list */
+       spin_lock_irqsave(&iwpm_reminfo_lock, flags);
+       for (i = 0; i < IWPM_REMINFO_HASH_SIZE; i++) {
+               hlist_for_each_entry_safe(rem_info, tmp_hlist_node,
+                       &iwpm_reminfo_bucket[i], hlist_node) {
+
+                               hlist_del_init(&rem_info->hlist_node);
+                               kfree(rem_info);
+                       }
+       }
+       /* free the hash list */
+       kfree(iwpm_reminfo_bucket);
+       iwpm_reminfo_bucket = NULL;
+       spin_unlock_irqrestore(&iwpm_reminfo_lock, flags);
+}
+
+static struct hlist_head *get_reminfo_hash_bucket(struct sockaddr_storage *,
+                                               struct sockaddr_storage *);
+
+void iwpm_add_remote_info(struct iwpm_remote_info *rem_info)
+{
+       struct hlist_head *hash_bucket_head;
+       unsigned long flags;
+
+       spin_lock_irqsave(&iwpm_reminfo_lock, flags);
+       if (iwpm_reminfo_bucket) {
+               hash_bucket_head = get_reminfo_hash_bucket(
+                                       &rem_info->mapped_loc_sockaddr,
+                                       &rem_info->mapped_rem_sockaddr);
+               if (hash_bucket_head)
+                       hlist_add_head(&rem_info->hlist_node, hash_bucket_head);
+       }
+       spin_unlock_irqrestore(&iwpm_reminfo_lock, flags);
+}
+
+int iwpm_get_remote_info(struct sockaddr_storage *mapped_loc_addr,
+                               struct sockaddr_storage *mapped_rem_addr,
+                               struct sockaddr_storage *remote_addr,
+                               u8 nl_client)
+{
+       struct hlist_node *tmp_hlist_node;
+       struct hlist_head *hash_bucket_head;
+       struct iwpm_remote_info *rem_info = NULL;
+       unsigned long flags;
+       int ret = -EINVAL;
+
+       if (!iwpm_valid_client(nl_client)) {
+               pr_info("%s: Invalid client = %d\n", __func__, nl_client);
+               return ret;
+       }
+       spin_lock_irqsave(&iwpm_reminfo_lock, flags);
+       if (iwpm_reminfo_bucket) {
+               hash_bucket_head = get_reminfo_hash_bucket(
+                                       mapped_loc_addr,
+                                       mapped_rem_addr);
+               if (!hash_bucket_head)
+                       goto get_remote_info_exit;
+               hlist_for_each_entry_safe(rem_info, tmp_hlist_node,
+                                       hash_bucket_head, hlist_node) {
+
+                       if (!iwpm_compare_sockaddr(&rem_info->mapped_loc_sockaddr,
+                               mapped_loc_addr) &&
+                               !iwpm_compare_sockaddr(&rem_info->mapped_rem_sockaddr,
+                               mapped_rem_addr)) {
+
+                               memcpy(remote_addr, &rem_info->remote_sockaddr,
+                                       sizeof(struct sockaddr_storage));
+                               iwpm_print_sockaddr(remote_addr,
+                                               "get_remote_info: Remote sockaddr:");
+
+                               hlist_del_init(&rem_info->hlist_node);
+                               kfree(rem_info);
+                               ret = 0;
+                               break;
+                       }
+               }
+       }
+get_remote_info_exit:
+       spin_unlock_irqrestore(&iwpm_reminfo_lock, flags);
+       return ret;
+}
+EXPORT_SYMBOL(iwpm_get_remote_info);
+
 struct iwpm_nlmsg_request *iwpm_get_nlmsg_request(__u32 nlmsg_seq,
                                        u8 nl_client, gfp_t gfp)
 {
@@ -409,31 +528,54 @@ static u32 iwpm_ipv4_jhash(struct sockaddr_in *ipv4_sockaddr)
        return hash;
 }
 
-static struct hlist_head *get_hash_bucket_head(struct sockaddr_storage
-                                              *local_sockaddr,
-                                              struct sockaddr_storage
-                                              *mapped_sockaddr)
+static int get_hash_bucket(struct sockaddr_storage *a_sockaddr,
+                               struct sockaddr_storage *b_sockaddr, u32 *hash)
 {
-       u32 local_hash, mapped_hash, hash;
+       u32 a_hash, b_hash;
 
-       if (local_sockaddr->ss_family == AF_INET) {
-               local_hash = iwpm_ipv4_jhash((struct sockaddr_in *) local_sockaddr);
-               mapped_hash = iwpm_ipv4_jhash((struct sockaddr_in *) mapped_sockaddr);
+       if (a_sockaddr->ss_family == AF_INET) {
+               a_hash = iwpm_ipv4_jhash((struct sockaddr_in *) a_sockaddr);
+               b_hash = iwpm_ipv4_jhash((struct sockaddr_in *) b_sockaddr);
 
-       } else if (local_sockaddr->ss_family == AF_INET6) {
-               local_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) local_sockaddr);
-               mapped_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) mapped_sockaddr);
+       } else if (a_sockaddr->ss_family == AF_INET6) {
+               a_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) a_sockaddr);
+               b_hash = iwpm_ipv6_jhash((struct sockaddr_in6 *) b_sockaddr);
        } else {
                pr_err("%s: Invalid sockaddr family\n", __func__);
-               return NULL;
+               return -EINVAL;
        }
 
-       if (local_hash == mapped_hash) /* if port mapper isn't available */
-               hash = local_hash;
+       if (a_hash == b_hash) /* if port mapper isn't available */
+               *hash = a_hash;
        else
-               hash = jhash_2words(local_hash, mapped_hash, 0);
+               *hash = jhash_2words(a_hash, b_hash, 0);
+       return 0;
+}
+
+static struct hlist_head *get_mapinfo_hash_bucket(struct sockaddr_storage
+                               *local_sockaddr, struct sockaddr_storage
+                               *mapped_sockaddr)
+{
+       u32 hash;
+       int ret;
 
-       return &iwpm_hash_bucket[hash & IWPM_HASH_BUCKET_MASK];
+       ret = get_hash_bucket(local_sockaddr, mapped_sockaddr, &hash);
+       if (ret)
+               return NULL;
+       return &iwpm_hash_bucket[hash & IWPM_MAPINFO_HASH_MASK];
+}
+
+static struct hlist_head *get_reminfo_hash_bucket(struct sockaddr_storage
+                               *mapped_loc_sockaddr, struct sockaddr_storage
+                               *mapped_rem_sockaddr)
+{
+       u32 hash;
+       int ret;
+
+       ret = get_hash_bucket(mapped_loc_sockaddr, mapped_rem_sockaddr, &hash);
+       if (ret)
+               return NULL;
+       return &iwpm_reminfo_bucket[hash & IWPM_REMINFO_HASH_MASK];
 }
 
 static int send_mapinfo_num(u32 mapping_num, u8 nl_client, int iwpm_pid)
@@ -512,7 +654,7 @@ int iwpm_send_mapinfo(u8 nl_client, int iwpm_pid)
        }
        skb_num++;
        spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
-       for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) {
+       for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) {
                hlist_for_each_entry(map_info, &iwpm_hash_bucket[i],
                                     hlist_node) {
                        if (map_info->nl_client != nl_client)
@@ -595,7 +737,7 @@ int iwpm_mapinfo_available(void)
 
        spin_lock_irqsave(&iwpm_mapinfo_lock, flags);
        if (iwpm_hash_bucket) {
-               for (i = 0; i < IWPM_HASH_BUCKET_SIZE; i++) {
+               for (i = 0; i < IWPM_MAPINFO_HASH_SIZE; i++) {
                        if (!hlist_empty(&iwpm_hash_bucket[i])) {
                                full_bucket = 1;
                                break;
index 9777c869a1405d54ab47f5132595bd3868953383..ee2d9ff095be2d68d14c9c48eb551f9647ca562f 100644 (file)
@@ -76,6 +76,14 @@ struct iwpm_mapping_info {
        u8     nl_client;
 };
 
+struct iwpm_remote_info {
+       struct hlist_node hlist_node;
+       struct sockaddr_storage remote_sockaddr;
+       struct sockaddr_storage mapped_loc_sockaddr;
+       struct sockaddr_storage mapped_rem_sockaddr;
+       u8     nl_client;
+};
+
 struct iwpm_admin_data {
        atomic_t refcount;
        atomic_t nlmsg_seq;
@@ -127,6 +135,13 @@ int iwpm_wait_complete_req(struct iwpm_nlmsg_request *nlmsg_request);
  */
 int iwpm_get_nlmsg_seq(void);
 
+/**
+ * iwpm_add_reminfo - Add remote address info of the connecting peer
+ *                    to the remote info hash table
+ * @reminfo: The remote info to be added
+ */
+void iwpm_add_remote_info(struct iwpm_remote_info *reminfo);
+
 /**
  * iwpm_valid_client - Check if the port mapper client is valid
  * @nl_client: The index of the netlink client
index 8b8cc6fa0ab0c1ebf966b2ace4932807d9181bf1..40becdb3196e07b97c94ade818af5755bfaae4db 100644 (file)
@@ -446,7 +446,6 @@ static int ib_umem_odp_map_dma_single_page(
        int remove_existing_mapping = 0;
        int ret = 0;
 
-       mutex_lock(&umem->odp_data->umem_mutex);
        /*
         * Note: we avoid writing if seq is different from the initial seq, to
         * handle case of a racing notifier. This check also allows us to bail
@@ -479,8 +478,6 @@ static int ib_umem_odp_map_dma_single_page(
        }
 
 out:
-       mutex_unlock(&umem->odp_data->umem_mutex);
-
        /* On Demand Paging - avoid pinning the page */
        if (umem->context->invalidate_range || !stored_page)
                put_page(page);
@@ -586,6 +583,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
 
                bcnt -= min_t(size_t, npages << PAGE_SHIFT, bcnt);
                user_virt += npages << PAGE_SHIFT;
+               mutex_lock(&umem->odp_data->umem_mutex);
                for (j = 0; j < npages; ++j) {
                        ret = ib_umem_odp_map_dma_single_page(
                                umem, k, base_virt_addr, local_page_list[j],
@@ -594,6 +592,7 @@ int ib_umem_odp_map_dma_pages(struct ib_umem *umem, u64 user_virt, u64 bcnt,
                                break;
                        k++;
                }
+               mutex_unlock(&umem->odp_data->umem_mutex);
 
                if (ret < 0) {
                        /* Release left over pages when handling errors. */
@@ -633,12 +632,11 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
         * faults from completion. We might be racing with other
         * invalidations, so we must make sure we free each page only
         * once. */
+       mutex_lock(&umem->odp_data->umem_mutex);
        for (addr = virt; addr < bound; addr += (u64)umem->page_size) {
                idx = (addr - ib_umem_start(umem)) / PAGE_SIZE;
-               mutex_lock(&umem->odp_data->umem_mutex);
                if (umem->odp_data->page_list[idx]) {
                        struct page *page = umem->odp_data->page_list[idx];
-                       struct page *head_page = compound_head(page);
                        dma_addr_t dma = umem->odp_data->dma_list[idx];
                        dma_addr_t dma_addr = dma & ODP_DMA_ADDR_MASK;
 
@@ -646,7 +644,8 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
 
                        ib_dma_unmap_page(dev, dma_addr, PAGE_SIZE,
                                          DMA_BIDIRECTIONAL);
-                       if (dma & ODP_WRITE_ALLOWED_BIT)
+                       if (dma & ODP_WRITE_ALLOWED_BIT) {
+                               struct page *head_page = compound_head(page);
                                /*
                                 * set_page_dirty prefers being called with
                                 * the page lock. However, MMU notifiers are
@@ -657,13 +656,14 @@ void ib_umem_odp_unmap_dma_pages(struct ib_umem *umem, u64 virt,
                                 * be removed.
                                 */
                                set_page_dirty(head_page);
+                       }
                        /* on demand pinning support */
                        if (!umem->context->invalidate_range)
                                put_page(page);
                        umem->odp_data->page_list[idx] = NULL;
                        umem->odp_data->dma_list[idx] = 0;
                }
-               mutex_unlock(&umem->odp_data->umem_mutex);
        }
+       mutex_unlock(&umem->odp_data->umem_mutex);
 }
 EXPORT_SYMBOL(ib_umem_odp_unmap_dma_pages);
index 57176ddd4c50ff677da5c711b8b7d14e9a020d0d..3ad8dc798f52c9101261882ac19138efb3905b94 100644 (file)
@@ -583,6 +583,22 @@ static void c4iw_record_pm_msg(struct c4iw_ep *ep,
                sizeof(ep->com.mapped_remote_addr));
 }
 
+static int get_remote_addr(struct c4iw_ep *parent_ep, struct c4iw_ep *child_ep)
+{
+       int ret;
+
+       print_addr(&parent_ep->com, __func__, "get_remote_addr parent_ep ");
+       print_addr(&child_ep->com, __func__, "get_remote_addr child_ep ");
+
+       ret = iwpm_get_remote_info(&parent_ep->com.mapped_local_addr,
+                                  &child_ep->com.mapped_remote_addr,
+                                  &child_ep->com.remote_addr, RDMA_NL_C4IW);
+       if (ret)
+               PDBG("Unable to find remote peer addr info - err %d\n", ret);
+
+       return ret;
+}
+
 static void best_mtu(const unsigned short *mtus, unsigned short mtu,
                     unsigned int *idx, int use_ts, int ipv6)
 {
@@ -675,7 +691,7 @@ static int send_connect(struct c4iw_ep *ep)
        if (is_t5(ep->com.dev->rdev.lldi.adapter_type)) {
                opt2 |= T5_OPT_2_VALID_F;
                opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
-               opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
+               opt2 |= T5_ISS_F;
        }
        t4_set_arp_err_handler(skb, ep, act_open_req_arp_failure);
 
@@ -2042,9 +2058,12 @@ static int act_open_rpl(struct c4iw_dev *dev, struct sk_buff *skb)
             status, status2errno(status));
 
        if (is_neg_adv(status)) {
-               dev_warn(&dev->rdev.lldi.pdev->dev,
-                        "Connection problems for atid %u status %u (%s)\n",
-                        atid, status, neg_adv_str(status));
+               PDBG("%s Connection problems for atid %u status %u (%s)\n",
+                    __func__, atid, status, neg_adv_str(status));
+               ep->stats.connect_neg_adv++;
+               mutex_lock(&dev->rdev.stats.lock);
+               dev->rdev.stats.neg_adv++;
+               mutex_unlock(&dev->rdev.stats.lock);
                return 0;
        }
 
@@ -2214,7 +2233,7 @@ static void accept_cr(struct c4iw_ep *ep, struct sk_buff *skb,
                u32 isn = (prandom_u32() & ~7UL) - 1;
                opt2 |= T5_OPT_2_VALID_F;
                opt2 |= CONG_CNTRL_V(CONG_ALG_TAHOE);
-               opt2 |= CONG_CNTRL_VALID; /* OPT_2_ISS for T5 */
+               opt2 |= T5_ISS_F;
                rpl5 = (void *)rpl;
                memset(&rpl5->iss, 0, roundup(sizeof(*rpl5)-sizeof(*rpl), 16));
                if (peer2peer)
@@ -2352,27 +2371,57 @@ static int pass_accept_req(struct c4iw_dev *dev, struct sk_buff *skb)
        state_set(&child_ep->com, CONNECTING);
        child_ep->com.dev = dev;
        child_ep->com.cm_id = NULL;
+
+       /*
+        * The mapped_local and mapped_remote addresses get setup with
+        * the actual 4-tuple.  The local address will be based on the
+        * actual local address of the connection, but on the port number
+        * of the parent listening endpoint.  The remote address is
+        * setup based on a query to the IWPM since we don't know what it
+        * originally was before mapping.  If no mapping was done, then
+        * mapped_remote == remote, and mapped_local == local.
+        */
        if (iptype == 4) {
                struct sockaddr_in *sin = (struct sockaddr_in *)
-                       &child_ep->com.local_addr;
+                       &child_ep->com.mapped_local_addr;
+
                sin->sin_family = PF_INET;
                sin->sin_port = local_port;
                sin->sin_addr.s_addr = *(__be32 *)local_ip;
-               sin = (struct sockaddr_in *)&child_ep->com.remote_addr;
+
+               sin = (struct sockaddr_in *)&child_ep->com.local_addr;
+               sin->sin_family = PF_INET;
+               sin->sin_port = ((struct sockaddr_in *)
+                                &parent_ep->com.local_addr)->sin_port;
+               sin->sin_addr.s_addr = *(__be32 *)local_ip;
+
+               sin = (struct sockaddr_in *)&child_ep->com.mapped_remote_addr;
                sin->sin_family = PF_INET;
                sin->sin_port = peer_port;
                sin->sin_addr.s_addr = *(__be32 *)peer_ip;
        } else {
                struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *)
-                       &child_ep->com.local_addr;
+                       &child_ep->com.mapped_local_addr;
+
                sin6->sin6_family = PF_INET6;
                sin6->sin6_port = local_port;
                memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
-               sin6 = (struct sockaddr_in6 *)&child_ep->com.remote_addr;
+
+               sin6 = (struct sockaddr_in6 *)&child_ep->com.local_addr;
+               sin6->sin6_family = PF_INET6;
+               sin6->sin6_port = ((struct sockaddr_in6 *)
+                                  &parent_ep->com.local_addr)->sin6_port;
+               memcpy(sin6->sin6_addr.s6_addr, local_ip, 16);
+
+               sin6 = (struct sockaddr_in6 *)&child_ep->com.mapped_remote_addr;
                sin6->sin6_family = PF_INET6;
                sin6->sin6_port = peer_port;
                memcpy(sin6->sin6_addr.s6_addr, peer_ip, 16);
        }
+       memcpy(&child_ep->com.remote_addr, &child_ep->com.mapped_remote_addr,
+              sizeof(child_ep->com.remote_addr));
+       get_remote_addr(parent_ep, child_ep);
+
        c4iw_get_ep(&parent_ep->com);
        child_ep->parent_ep = parent_ep;
        child_ep->tos = PASS_OPEN_TOS_G(ntohl(req->tos_stid));
@@ -2520,9 +2569,13 @@ static int peer_abort(struct c4iw_dev *dev, struct sk_buff *skb)
 
        ep = lookup_tid(t, tid);
        if (is_neg_adv(req->status)) {
-               dev_warn(&dev->rdev.lldi.pdev->dev,
-                        "Negative advice on abort - tid %u status %d (%s)\n",
-                        ep->hwtid, req->status, neg_adv_str(req->status));
+               PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
+                    __func__, ep->hwtid, req->status,
+                    neg_adv_str(req->status));
+               ep->stats.abort_neg_adv++;
+               mutex_lock(&dev->rdev.stats.lock);
+               dev->rdev.stats.neg_adv++;
+               mutex_unlock(&dev->rdev.stats.lock);
                return 0;
        }
        PDBG("%s ep %p tid %u state %u\n", __func__, ep, ep->hwtid,
@@ -3571,7 +3624,7 @@ static void send_fw_pass_open_req(struct c4iw_dev *dev, struct sk_buff *skb,
         * TP will ignore any value > 0 for MSS index.
         */
        req->tcb.opt0 = cpu_to_be64(MSS_IDX_V(0xF));
-       req->cookie = (unsigned long)skb;
+       req->cookie = (uintptr_t)skb;
 
        set_wr_txq(req_skb, CPL_PRIORITY_CONTROL, port_id);
        ret = cxgb4_ofld_send(dev->rdev.lldi.ports[0], req_skb);
@@ -3931,9 +3984,11 @@ static int peer_abort_intr(struct c4iw_dev *dev, struct sk_buff *skb)
                return 0;
        }
        if (is_neg_adv(req->status)) {
-               dev_warn(&dev->rdev.lldi.pdev->dev,
-                        "Negative advice on abort - tid %u status %d (%s)\n",
-                        ep->hwtid, req->status, neg_adv_str(req->status));
+               PDBG("%s Negative advice on abort- tid %u status %d (%s)\n",
+                    __func__, ep->hwtid, req->status,
+                    neg_adv_str(req->status));
+               ep->stats.abort_neg_adv++;
+               dev->rdev.stats.neg_adv++;
                kfree_skb(skb);
                return 0;
        }
index ab7692ac2044b0a351c10681f08f63759f1934e8..68ddb37102152ec5382a7bbee7e9bab89e681b94 100644 (file)
@@ -55,7 +55,7 @@ static int destroy_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
                        FW_RI_RES_WR_NRES_V(1) |
                        FW_WR_COMPL_F);
        res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
-       res_wr->cookie = (unsigned long) &wr_wait;
+       res_wr->cookie = (uintptr_t)&wr_wait;
        res = res_wr->res;
        res->u.cq.restype = FW_RI_RES_TYPE_CQ;
        res->u.cq.op = FW_RI_RES_OP_RESET;
@@ -125,7 +125,7 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
                        FW_RI_RES_WR_NRES_V(1) |
                        FW_WR_COMPL_F);
        res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
-       res_wr->cookie = (unsigned long) &wr_wait;
+       res_wr->cookie = (uintptr_t)&wr_wait;
        res = res_wr->res;
        res->u.cq.restype = FW_RI_RES_TYPE_CQ;
        res->u.cq.op = FW_RI_RES_OP_WRITE;
@@ -156,12 +156,19 @@ static int create_cq(struct c4iw_rdev *rdev, struct t4_cq *cq,
                goto err4;
 
        cq->gen = 1;
-       cq->gts = rdev->lldi.gts_reg;
        cq->rdev = rdev;
        if (user) {
-               cq->ugts = (u64)pci_resource_start(rdev->lldi.pdev, 2) +
-                                       (cq->cqid << rdev->cqshift);
-               cq->ugts &= PAGE_MASK;
+               u32 off = (cq->cqid << rdev->cqshift) & PAGE_MASK;
+
+               cq->ugts = (u64)rdev->bar2_pa + off;
+       } else if (is_t4(rdev->lldi.adapter_type)) {
+               cq->gts = rdev->lldi.gts_reg;
+               cq->qid_mask = -1U;
+       } else {
+               u32 off = ((cq->cqid << rdev->cqshift) & PAGE_MASK) + 12;
+
+               cq->gts = rdev->bar2_kva + off;
+               cq->qid_mask = rdev->qpmask;
        }
        return 0;
 err4:
@@ -970,8 +977,7 @@ struct ib_cq *c4iw_create_cq(struct ib_device *ibdev, int entries,
        }
        PDBG("%s cqid 0x%0x chp %p size %u memsize %zu, dma_addr 0x%0llx\n",
             __func__, chp->cq.cqid, chp, chp->cq.size,
-            chp->cq.memsize,
-            (unsigned long long) chp->cq.dma_addr);
+            chp->cq.memsize, (unsigned long long) chp->cq.dma_addr);
        return &chp->ibcq;
 err5:
        kfree(mm2);
index 8fb295e4a9ab7199a385dc7778ee6ae1e409a5c1..7e895d714b19e35a49ddd69f5a3432af5bcf1f71 100644 (file)
@@ -93,6 +93,7 @@ static struct ibnl_client_cbs c4iw_nl_cb_table[] = {
        [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
        [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
        [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
+       [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
        [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
        [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
 };
@@ -151,7 +152,7 @@ static int wr_log_show(struct seq_file *seq, void *v)
        int prev_ts_set = 0;
        int idx, end;
 
-#define ts2ns(ts) div64_ul((ts) * dev->rdev.lldi.cclk_ps, 1000)
+#define ts2ns(ts) div64_u64((ts) * dev->rdev.lldi.cclk_ps, 1000)
 
        idx = atomic_read(&dev->rdev.wr_log_idx) &
                (dev->rdev.wr_log_size - 1);
@@ -489,6 +490,7 @@ static int stats_show(struct seq_file *seq, void *v)
                   dev->rdev.stats.act_ofld_conn_fails);
        seq_printf(seq, "PAS_OFLD_CONN_FAILS: %10llu\n",
                   dev->rdev.stats.pas_ofld_conn_fails);
+       seq_printf(seq, "NEG_ADV_RCVD: %10llu\n", dev->rdev.stats.neg_adv);
        seq_printf(seq, "AVAILABLE IRD: %10u\n", dev->avail_ird);
        return 0;
 }
@@ -560,10 +562,13 @@ static int dump_ep(int id, void *p, void *data)
                cc = snprintf(epd->buf + epd->pos, space,
                              "ep %p cm_id %p qp %p state %d flags 0x%lx "
                              "history 0x%lx hwtid %d atid %d "
+                             "conn_na %u abort_na %u "
                              "%pI4:%d/%d <-> %pI4:%d/%d\n",
                              ep, ep->com.cm_id, ep->com.qp,
                              (int)ep->com.state, ep->com.flags,
                              ep->com.history, ep->hwtid, ep->atid,
+                             ep->stats.connect_neg_adv,
+                             ep->stats.abort_neg_adv,
                              &lsin->sin_addr, ntohs(lsin->sin_port),
                              ntohs(mapped_lsin->sin_port),
                              &rsin->sin_addr, ntohs(rsin->sin_port),
@@ -581,10 +586,13 @@ static int dump_ep(int id, void *p, void *data)
                cc = snprintf(epd->buf + epd->pos, space,
                              "ep %p cm_id %p qp %p state %d flags 0x%lx "
                              "history 0x%lx hwtid %d atid %d "
+                             "conn_na %u abort_na %u "
                              "%pI6:%d/%d <-> %pI6:%d/%d\n",
                              ep, ep->com.cm_id, ep->com.qp,
                              (int)ep->com.state, ep->com.flags,
                              ep->com.history, ep->hwtid, ep->atid,
+                             ep->stats.connect_neg_adv,
+                             ep->stats.abort_neg_adv,
                              &lsin6->sin6_addr, ntohs(lsin6->sin6_port),
                              ntohs(mapped_lsin6->sin6_port),
                              &rsin6->sin6_addr, ntohs(rsin6->sin6_port),
@@ -764,6 +772,29 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
 
        c4iw_init_dev_ucontext(rdev, &rdev->uctx);
 
+       /*
+        * This implementation assumes udb_density == ucq_density!  Eventually
+        * we might need to support this but for now fail the open. Also the
+        * cqid and qpid range must match for now.
+        */
+       if (rdev->lldi.udb_density != rdev->lldi.ucq_density) {
+               pr_err(MOD "%s: unsupported udb/ucq densities %u/%u\n",
+                      pci_name(rdev->lldi.pdev), rdev->lldi.udb_density,
+                      rdev->lldi.ucq_density);
+               err = -EINVAL;
+               goto err1;
+       }
+       if (rdev->lldi.vr->qp.start != rdev->lldi.vr->cq.start ||
+           rdev->lldi.vr->qp.size != rdev->lldi.vr->cq.size) {
+               pr_err(MOD "%s: unsupported qp and cq id ranges "
+                      "qp start %u size %u cq start %u size %u\n",
+                      pci_name(rdev->lldi.pdev), rdev->lldi.vr->qp.start,
+                      rdev->lldi.vr->qp.size, rdev->lldi.vr->cq.size,
+                      rdev->lldi.vr->cq.size);
+               err = -EINVAL;
+               goto err1;
+       }
+
        /*
         * qpshift is the number of bits to shift the qpid left in order
         * to get the correct address of the doorbell for that qp.
@@ -784,10 +815,10 @@ static int c4iw_rdev_open(struct c4iw_rdev *rdev)
             rdev->lldi.vr->qp.size,
             rdev->lldi.vr->cq.start,
             rdev->lldi.vr->cq.size);
-       PDBG("udb len 0x%x udb base %llx db_reg %p gts_reg %p qpshift %lu "
+       PDBG("udb len 0x%x udb base %p db_reg %p gts_reg %p qpshift %lu "
             "qpmask 0x%x cqshift %lu cqmask 0x%x\n",
             (unsigned)pci_resource_len(rdev->lldi.pdev, 2),
-            (u64)pci_resource_start(rdev->lldi.pdev, 2),
+            (void *)pci_resource_start(rdev->lldi.pdev, 2),
             rdev->lldi.db_reg,
             rdev->lldi.gts_reg,
             rdev->qpshift, rdev->qpmask,
@@ -1355,7 +1386,7 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
                                          t4_sq_host_wq_pidx(&qp->wq),
                                          t4_sq_wq_size(&qp->wq));
                if (ret) {
-                       pr_err(KERN_ERR MOD "%s: Fatal error - "
+                       pr_err(MOD "%s: Fatal error - "
                               "DB overflow recovery failed - "
                               "error syncing SQ qid %u\n",
                               pci_name(ctx->lldi.pdev), qp->wq.sq.qid);
@@ -1371,7 +1402,7 @@ static void recover_lost_dbs(struct uld_ctx *ctx, struct qp_list *qp_list)
                                          t4_rq_wq_size(&qp->wq));
 
                if (ret) {
-                       pr_err(KERN_ERR MOD "%s: Fatal error - "
+                       pr_err(MOD "%s: Fatal error - "
                               "DB overflow recovery failed - "
                               "error syncing RQ qid %u\n",
                               pci_name(ctx->lldi.pdev), qp->wq.rq.qid);
index d87e1650f6437835f3660c21d3a59ec920fa8f7c..97bb5550a6cf64bd77eb3d429b43b6c9d3e10b7e 100644 (file)
@@ -137,6 +137,7 @@ struct c4iw_stats {
        u64  tcam_full;
        u64  act_ofld_conn_fails;
        u64  pas_ofld_conn_fails;
+       u64  neg_adv;
 };
 
 struct c4iw_hw_queue {
@@ -814,6 +815,11 @@ struct c4iw_listen_ep {
        int backlog;
 };
 
+struct c4iw_ep_stats {
+       unsigned connect_neg_adv;
+       unsigned abort_neg_adv;
+};
+
 struct c4iw_ep {
        struct c4iw_ep_common com;
        struct c4iw_ep *parent_ep;
@@ -846,6 +852,7 @@ struct c4iw_ep {
        unsigned int retry_count;
        int snd_win;
        int rcv_win;
+       struct c4iw_ep_stats stats;
 };
 
 static inline void print_addr(struct c4iw_ep_common *epc, const char *func,
index 3ef0cf9f5c4403863310ffc5a7d92cd6bfc970d9..cff815b9170716a01bca790c586b7a9b14c586e7 100644 (file)
@@ -144,7 +144,7 @@ static int _c4iw_write_mem_inline(struct c4iw_rdev *rdev, u32 addr, u32 len,
                if (i == (num_wqe-1)) {
                        req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR) |
                                                    FW_WR_COMPL_F);
-                       req->wr.wr_lo = (__force __be64)(unsigned long) &wr_wait;
+                       req->wr.wr_lo = (__force __be64)&wr_wait;
                } else
                        req->wr.wr_hi = cpu_to_be32(FW_WR_OP_V(FW_ULPTX_WR));
                req->wr.wr_mid = cpu_to_be32(
@@ -676,12 +676,12 @@ struct ib_mr *c4iw_get_dma_mr(struct ib_pd *pd, int acc)
        mhp->attr.zbva = 0;
        mhp->attr.va_fbo = 0;
        mhp->attr.page_size = 0;
-       mhp->attr.len = ~0UL;
+       mhp->attr.len = ~0ULL;
        mhp->attr.pbl_size = 0;
 
        ret = write_tpt_entry(&rhp->rdev, 0, &stag, 1, php->pdid,
                              FW_RI_STAG_NSMR, mhp->attr.perms,
-                             mhp->attr.mw_bind_enable, 0, 0, ~0UL, 0, 0, 0);
+                             mhp->attr.mw_bind_enable, 0, 0, ~0ULL, 0, 0, 0);
        if (ret)
                goto err1;
 
index 15cae5a3101851ed1a0cb049ff43987651a8955c..389ced335bc5cc528f7ef4ba1e4121c5ec79295c 100644 (file)
@@ -275,7 +275,7 @@ static int create_qp(struct c4iw_rdev *rdev, struct t4_wq *wq,
                        FW_RI_RES_WR_NRES_V(2) |
                        FW_WR_COMPL_F);
        res_wr->len16_pkd = cpu_to_be32(DIV_ROUND_UP(wr_len, 16));
-       res_wr->cookie = (unsigned long) &wr_wait;
+       res_wr->cookie = (uintptr_t)&wr_wait;
        res = res_wr->res;
        res->u.sqrq.restype = FW_RI_RES_TYPE_SQ;
        res->u.sqrq.op = FW_RI_RES_OP_WRITE;
@@ -1209,7 +1209,7 @@ static int rdma_fini(struct c4iw_dev *rhp, struct c4iw_qp *qhp,
        wqe->flowid_len16 = cpu_to_be32(
                FW_WR_FLOWID_V(ep->hwtid) |
                FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
-       wqe->cookie = (unsigned long) &ep->com.wr_wait;
+       wqe->cookie = (uintptr_t)&ep->com.wr_wait;
 
        wqe->u.fini.type = FW_RI_TYPE_FINI;
        ret = c4iw_ofld_send(&rhp->rdev, skb);
@@ -1279,7 +1279,7 @@ static int rdma_init(struct c4iw_dev *rhp, struct c4iw_qp *qhp)
                FW_WR_FLOWID_V(qhp->ep->hwtid) |
                FW_WR_LEN16_V(DIV_ROUND_UP(sizeof(*wqe), 16)));
 
-       wqe->cookie = (unsigned long) &qhp->ep->com.wr_wait;
+       wqe->cookie = (uintptr_t)&qhp->ep->com.wr_wait;
 
        wqe->u.init.type = FW_RI_TYPE_INIT;
        wqe->u.init.mpareqbit_p2ptype =
@@ -1766,11 +1766,11 @@ struct ib_qp *c4iw_create_qp(struct ib_pd *pd, struct ib_qp_init_attr *attrs,
                mm2->len = PAGE_ALIGN(qhp->wq.rq.memsize);
                insert_mmap(ucontext, mm2);
                mm3->key = uresp.sq_db_gts_key;
-               mm3->addr = (__force unsigned long) qhp->wq.sq.udb;
+               mm3->addr = (__force unsigned long)qhp->wq.sq.udb;
                mm3->len = PAGE_SIZE;
                insert_mmap(ucontext, mm3);
                mm4->key = uresp.rq_db_gts_key;
-               mm4->addr = (__force unsigned long) qhp->wq.rq.udb;
+               mm4->addr = (__force unsigned long)qhp->wq.rq.udb;
                mm4->len = PAGE_SIZE;
                insert_mmap(ucontext, mm4);
                if (mm5) {
index 871cdcac7be26a3479f78eb34c3f799f5a8b840f..7f2a6c244d25d67ea922ab35ba568e8b73196ffe 100644 (file)
@@ -539,6 +539,7 @@ struct t4_cq {
        size_t memsize;
        __be64 bits_type_ts;
        u32 cqid;
+       u32 qid_mask;
        int vector;
        u16 size; /* including status page */
        u16 cidx;
@@ -563,12 +564,12 @@ static inline int t4_arm_cq(struct t4_cq *cq, int se)
        set_bit(CQ_ARMED, &cq->flags);
        while (cq->cidx_inc > CIDXINC_M) {
                val = SEINTARM_V(0) | CIDXINC_V(CIDXINC_M) | TIMERREG_V(7) |
-                     INGRESSQID_V(cq->cqid);
+                     INGRESSQID_V(cq->cqid & cq->qid_mask);
                writel(val, cq->gts);
                cq->cidx_inc -= CIDXINC_M;
        }
        val = SEINTARM_V(se) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(6) |
-             INGRESSQID_V(cq->cqid);
+             INGRESSQID_V(cq->cqid & cq->qid_mask);
        writel(val, cq->gts);
        cq->cidx_inc = 0;
        return 0;
@@ -601,7 +602,7 @@ static inline void t4_hwcq_consume(struct t4_cq *cq)
                u32 val;
 
                val = SEINTARM_V(0) | CIDXINC_V(cq->cidx_inc) | TIMERREG_V(7) |
-                     INGRESSQID_V(cq->cqid);
+                     INGRESSQID_V(cq->cqid & cq->qid_mask);
                writel(val, cq->gts);
                cq->cidx_inc = 0;
        }
index 5e53327fc6476b678227609bee4aee90bbb0c7c0..343e8daf2270dbc07680d2ef2e2b2027181f292c 100644 (file)
@@ -848,6 +848,8 @@ enum {                     /* TCP congestion control algorithms */
 #define CONG_CNTRL_V(x) ((x) << CONG_CNTRL_S)
 #define CONG_CNTRL_G(x) (((x) >> CONG_CNTRL_S) & CONG_CNTRL_M)
 
-#define CONG_CNTRL_VALID   (1 << 18)
+#define T5_ISS_S    18
+#define T5_ISS_V(x) ((x) << T5_ISS_S)
+#define T5_ISS_F    T5_ISS_V(1U)
 
 #endif /* _T4FW_RI_API_H_ */
index 120aedf9f989a7a34839046a360dd9ad53816d64..cec1815329245cfa6573f39ce3f44f628175fea7 100644 (file)
@@ -77,7 +77,7 @@ int ehca_attach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
                return -EINVAL;
        }
 
-       memcpy(&my_gid.raw, gid->raw, sizeof(union ib_gid));
+       memcpy(&my_gid, gid->raw, sizeof(union ib_gid));
 
        subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
        interface_id = be64_to_cpu(my_gid.global.interface_id);
@@ -114,7 +114,7 @@ int ehca_detach_mcast(struct ib_qp *ibqp, union ib_gid *gid, u16 lid)
                return -EINVAL;
        }
 
-       memcpy(&my_gid.raw, gid->raw, sizeof(union ib_gid));
+       memcpy(&my_gid, gid->raw, sizeof(union ib_gid));
 
        subnet_prefix = be64_to_cpu(my_gid.global.subnet_prefix);
        interface_id = be64_to_cpu(my_gid.global.interface_id);
index 1d9bb115cbf60799cd3c705bc0b166bb7640e7d7..8fe54ff00580844479b16345e15e04590d316714 100644 (file)
@@ -9,3 +9,6 @@ config INFINIBAND_IPATH
        as IP-over-InfiniBand as well as with userspace applications
        (in conjunction with InfiniBand userspace access).
        For QLogic PCIe QLE based cards, use the QIB driver instead.
+
+       If you have this hardware you will need to boot with PAT disabled
+       on your x86-64 systems, use the nopat kernel parameter.
index bd0caedafe9955c07683b6be7bbb2bdd346a704c..2d7e503d13cb5b9c2855936ce162f41d49ca0ced 100644 (file)
@@ -42,6 +42,9 @@
 #include <linux/bitmap.h>
 #include <linux/slab.h>
 #include <linux/module.h>
+#ifdef CONFIG_X86_64
+#include <asm/pat.h>
+#endif
 
 #include "ipath_kernel.h"
 #include "ipath_verbs.h"
@@ -395,6 +398,14 @@ static int ipath_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        unsigned long long addr;
        u32 bar0 = 0, bar1 = 0;
 
+#ifdef CONFIG_X86_64
+       if (WARN(pat_enabled(),
+                "ipath needs PAT disabled, boot with nopat kernel parameter\n")) {
+               ret = -ENODEV;
+               goto bail;
+       }
+#endif
+
        dd = ipath_alloc_devdata(pdev);
        if (IS_ERR(dd)) {
                ret = PTR_ERR(dd);
@@ -542,6 +553,7 @@ static int ipath_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
        dd->ipath_kregbase = __ioremap(addr, len,
                (_PAGE_NO_CACHE|_PAGE_WRITETHRU));
 #else
+       /* XXX: split this properly to enable on PAT */
        dd->ipath_kregbase = ioremap_nocache(addr, len);
 #endif
 
@@ -587,12 +599,8 @@ static int ipath_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
 
        ret = ipath_enable_wc(dd);
 
-       if (ret) {
-               ipath_dev_err(dd, "Write combining not enabled "
-                             "(err %d): performance may be poor\n",
-                             -ret);
+       if (ret)
                ret = 0;
-       }
 
        ipath_verify_pioperf(dd);
 
index e08db7020cd4939809dd456882ff3d4e69480480..f0f9471227793f99f386555100ea0254ff39a1f9 100644 (file)
@@ -463,9 +463,7 @@ struct ipath_devdata {
        /* offset in HT config space of slave/primary interface block */
        u8 ipath_ht_slave_off;
        /* for write combining settings */
-       unsigned long ipath_wc_cookie;
-       unsigned long ipath_wc_base;
-       unsigned long ipath_wc_len;
+       int wc_cookie;
        /* ref count for each pkey */
        atomic_t ipath_pkeyrefs[4];
        /* shadow copy of struct page *'s for exp tid pages */
index 4ad0b932df1fab1c1897f144db9ffc8af35c5f73..7b6e4c843e19dce46ff287c416422b60378632ac 100644 (file)
@@ -37,7 +37,6 @@
  */
 
 #include <linux/pci.h>
-#include <asm/mtrr.h>
 #include <asm/processor.h>
 
 #include "ipath_kernel.h"
@@ -122,27 +121,14 @@ int ipath_enable_wc(struct ipath_devdata *dd)
        }
 
        if (!ret) {
-               int cookie;
-               ipath_cdbg(VERBOSE, "Setting mtrr for chip to WC "
-                          "(addr %llx, len=0x%llx)\n",
-                          (unsigned long long) pioaddr,
-                          (unsigned long long) piolen);
-               cookie = mtrr_add(pioaddr, piolen, MTRR_TYPE_WRCOMB, 0);
-               if (cookie < 0) {
-                       {
-                               dev_info(&dd->pcidev->dev,
-                                        "mtrr_add()  WC for PIO bufs "
-                                        "failed (%d)\n",
-                                        cookie);
-                               ret = -EINVAL;
-                       }
-               } else {
-                       ipath_cdbg(VERBOSE, "Set mtrr for chip to WC, "
-                                  "cookie is %d\n", cookie);
-                       dd->ipath_wc_cookie = cookie;
-                       dd->ipath_wc_base = (unsigned long) pioaddr;
-                       dd->ipath_wc_len = (unsigned long) piolen;
-               }
+               dd->wc_cookie = arch_phys_wc_add(pioaddr, piolen);
+               if (dd->wc_cookie < 0) {
+                       ipath_dev_err(dd, "Seting mtrr failed on PIO buffers\n");
+                       ret = -ENODEV;
+               } else if (dd->wc_cookie == 0)
+                       ipath_cdbg(VERBOSE, "Set mtrr for chip to WC not needed\n");
+               else
+                       ipath_cdbg(VERBOSE, "Set mtrr for chip to WC\n");
        }
 
        return ret;
@@ -154,16 +140,5 @@ int ipath_enable_wc(struct ipath_devdata *dd)
  */
 void ipath_disable_wc(struct ipath_devdata *dd)
 {
-       if (dd->ipath_wc_cookie) {
-               int r;
-               ipath_cdbg(VERBOSE, "undoing WCCOMB on pio buffers\n");
-               r = mtrr_del(dd->ipath_wc_cookie, dd->ipath_wc_base,
-                            dd->ipath_wc_len);
-               if (r < 0)
-                       dev_info(&dd->pcidev->dev,
-                                "mtrr_del(%lx, %lx, %lx) failed: %d\n",
-                                dd->ipath_wc_cookie, dd->ipath_wc_base,
-                                dd->ipath_wc_len, r);
-               dd->ipath_wc_cookie = 0; /* even on failure */
-       }
+       arch_phys_wc_del(dd->wc_cookie);
 }
index 57070c529dfb5ca038e2118212684dc54bb3d21b..cc64400d41ace3005c8a878b4c6811b0506726f9 100644 (file)
@@ -1569,8 +1569,7 @@ static void reset_gids_task(struct work_struct *work)
                               MLX4_CMD_TIME_CLASS_B,
                               MLX4_CMD_WRAPPED);
                if (err)
-                       pr_warn(KERN_WARNING
-                               "set port %d command failed\n", gw->port);
+                       pr_warn("set port %d command failed\n", gw->port);
        }
 
        mlx4_free_cmd_mailbox(dev, mailbox);
index 4d7024b899cb091a12aacfa9450af1e7750d4f45..d35f62d4f4c58ecce848cfb2d0544500c62dbfd3 100644 (file)
@@ -1392,7 +1392,7 @@ static int mlx5_set_path(struct mlx5_ib_dev *dev, const struct ib_ah_attr *ah,
 
        if (ah->ah_flags & IB_AH_GRH) {
                if (ah->grh.sgid_index >= gen->port[port - 1].gid_table_len) {
-                       pr_err(KERN_ERR "sgid_index (%u) too large. max is %d\n",
+                       pr_err("sgid_index (%u) too large. max is %d\n",
                               ah->grh.sgid_index, gen->port[port - 1].gid_table_len);
                        return -EINVAL;
                }
index 3b2a6dc8ea99d734645a24cef66a78867f66a2cd..9f9d5c563a614c0c273368966d380c82d9b2a1a4 100644 (file)
@@ -116,6 +116,7 @@ static struct ibnl_client_cbs nes_nl_cb_table[] = {
        [RDMA_NL_IWPM_REG_PID] = {.dump = iwpm_register_pid_cb},
        [RDMA_NL_IWPM_ADD_MAPPING] = {.dump = iwpm_add_mapping_cb},
        [RDMA_NL_IWPM_QUERY_MAPPING] = {.dump = iwpm_add_and_query_mapping_cb},
+       [RDMA_NL_IWPM_REMOTE_INFO] = {.dump = iwpm_remote_info_cb},
        [RDMA_NL_IWPM_HANDLE_ERR] = {.dump = iwpm_mapping_error_cb},
        [RDMA_NL_IWPM_MAPINFO] = {.dump = iwpm_mapping_info_cb},
        [RDMA_NL_IWPM_MAPINFO_NUM] = {.dump = iwpm_ack_mapping_info_cb}
index 6f09a72e78d7d8ec9690413924e079af764aeaf3..72b43417cbe382aed9164b5554e80b449270c3ad 100644 (file)
@@ -596,27 +596,52 @@ static void nes_form_reg_msg(struct nes_vnic *nesvnic,
        memcpy(pm_msg->if_name, nesvnic->netdev->name, IWPM_IFNAME_SIZE);
 }
 
+static void record_sockaddr_info(struct sockaddr_storage *addr_info,
+                                       nes_addr_t *ip_addr, u16 *port_num)
+{
+       struct sockaddr_in *in_addr = (struct sockaddr_in *)addr_info;
+
+       if (in_addr->sin_family == AF_INET) {
+               *ip_addr = ntohl(in_addr->sin_addr.s_addr);
+               *port_num = ntohs(in_addr->sin_port);
+       }
+}
+
 /*
  * nes_record_pm_msg - Save the received mapping info
  */
 static void nes_record_pm_msg(struct nes_cm_info *cm_info,
                        struct iwpm_sa_data *pm_msg)
 {
-       struct sockaddr_in *mapped_loc_addr =
-                       (struct sockaddr_in *)&pm_msg->mapped_loc_addr;
-       struct sockaddr_in *mapped_rem_addr =
-                       (struct sockaddr_in *)&pm_msg->mapped_rem_addr;
-
-       if (mapped_loc_addr->sin_family == AF_INET) {
-               cm_info->mapped_loc_addr =
-                       ntohl(mapped_loc_addr->sin_addr.s_addr);
-               cm_info->mapped_loc_port = ntohs(mapped_loc_addr->sin_port);
-       }
-       if (mapped_rem_addr->sin_family == AF_INET) {
-               cm_info->mapped_rem_addr =
-                       ntohl(mapped_rem_addr->sin_addr.s_addr);
-               cm_info->mapped_rem_port = ntohs(mapped_rem_addr->sin_port);
-       }
+       record_sockaddr_info(&pm_msg->mapped_loc_addr,
+               &cm_info->mapped_loc_addr, &cm_info->mapped_loc_port);
+
+       record_sockaddr_info(&pm_msg->mapped_rem_addr,
+               &cm_info->mapped_rem_addr, &cm_info->mapped_rem_port);
+}
+
+/*
+ * nes_get_reminfo - Get the address info of the remote connecting peer
+ */
+static int nes_get_remote_addr(struct nes_cm_node *cm_node)
+{
+       struct sockaddr_storage mapped_loc_addr, mapped_rem_addr;
+       struct sockaddr_storage remote_addr;
+       int ret;
+
+       nes_create_sockaddr(htonl(cm_node->mapped_loc_addr),
+                       htons(cm_node->mapped_loc_port), &mapped_loc_addr);
+       nes_create_sockaddr(htonl(cm_node->mapped_rem_addr),
+                       htons(cm_node->mapped_rem_port), &mapped_rem_addr);
+
+       ret = iwpm_get_remote_info(&mapped_loc_addr, &mapped_rem_addr,
+                               &remote_addr, RDMA_NL_NES);
+       if (ret)
+               nes_debug(NES_DBG_CM, "Unable to find remote peer address info\n");
+       else
+               record_sockaddr_info(&remote_addr, &cm_node->rem_addr,
+                               &cm_node->rem_port);
+       return ret;
 }
 
 /**
@@ -1566,9 +1591,14 @@ static struct nes_cm_node *make_cm_node(struct nes_cm_core *cm_core,
                return NULL;
 
        /* set our node specific transport info */
-       cm_node->loc_addr = cm_info->loc_addr;
+       if (listener) {
+               cm_node->loc_addr = listener->loc_addr;
+               cm_node->loc_port = listener->loc_port;
+       } else {
+               cm_node->loc_addr = cm_info->loc_addr;
+               cm_node->loc_port = cm_info->loc_port;
+       }
        cm_node->rem_addr = cm_info->rem_addr;
-       cm_node->loc_port = cm_info->loc_port;
        cm_node->rem_port = cm_info->rem_port;
 
        cm_node->mapped_loc_addr = cm_info->mapped_loc_addr;
@@ -2151,6 +2181,7 @@ static int handle_ack_pkt(struct nes_cm_node *cm_node, struct sk_buff *skb,
                cm_node->state = NES_CM_STATE_ESTABLISHED;
                if (datasize) {
                        cm_node->tcp_cntxt.rcv_nxt = inc_sequence + datasize;
+                       nes_get_remote_addr(cm_node);
                        handle_rcv_mpa(cm_node, skb);
                } else { /* rcvd ACK only */
                        dev_kfree_skb_any(skb);
index c9780d919769a6ef9a0020b7e710afa7e7ce2497..b396344fae16af33153f0625104d95c17f488a8d 100644 (file)
@@ -40,7 +40,7 @@
 #include <be_roce.h>
 #include "ocrdma_sli.h"
 
-#define OCRDMA_ROCE_DRV_VERSION "10.4.205.0u"
+#define OCRDMA_ROCE_DRV_VERSION "10.6.0.0"
 
 #define OCRDMA_ROCE_DRV_DESC "Emulex OneConnect RoCE Driver"
 #define OCRDMA_NODE_DESC "Emulex OneConnect RoCE HCA"
@@ -515,6 +515,8 @@ static inline int ocrdma_resolve_dmac(struct ocrdma_dev *dev,
        memcpy(&in6, ah_attr->grh.dgid.raw, sizeof(in6));
        if (rdma_is_multicast_addr(&in6))
                rdma_get_mcast_mac(&in6, mac_addr);
+       else if (rdma_link_local_addr(&in6))
+               rdma_get_ll_mac(&in6, mac_addr);
        else
                memcpy(mac_addr, ah_attr->dmac, ETH_ALEN);
        return 0;
index d812904f398473d1502bb979d6d822c04b55f2b8..f5a5ea836dbdc9fe4f12e6f26e1acca5dac3c6d1 100644 (file)
@@ -56,7 +56,13 @@ static inline int set_av_attr(struct ocrdma_dev *dev, struct ocrdma_ah *ah,
        vlan_tag = attr->vlan_id;
        if (!vlan_tag || (vlan_tag > 0xFFF))
                vlan_tag = dev->pvid;
-       if (vlan_tag && (vlan_tag < 0x1000)) {
+       if (vlan_tag || dev->pfc_state) {
+               if (!vlan_tag) {
+                       pr_err("ocrdma%d:Using VLAN with PFC is recommended\n",
+                               dev->id);
+                       pr_err("ocrdma%d:Using VLAN 0 for this connection\n",
+                               dev->id);
+               }
                eth.eth_type = cpu_to_be16(0x8100);
                eth.roce_eth_type = cpu_to_be16(OCRDMA_ROCE_ETH_TYPE);
                vlan_tag |= (dev->sl & 0x07) << OCRDMA_VID_PCP_SHIFT;
@@ -121,7 +127,9 @@ struct ib_ah *ocrdma_create_ah(struct ib_pd *ibpd, struct ib_ah_attr *attr)
                goto av_conf_err;
        }
 
-       if (pd->uctx) {
+       if ((pd->uctx) &&
+           (!rdma_is_multicast_addr((struct in6_addr *)attr->grh.dgid.raw)) &&
+           (!rdma_link_local_addr((struct in6_addr *)attr->grh.dgid.raw))) {
                status = rdma_addr_find_dmac_by_grh(&sgid, &attr->grh.dgid,
                                         attr->dmac, &attr->vlan_id);
                if (status) {
index 0c9e95909a64651e931f4768e88f97e266c8379e..47615ff33bc6a1fb8c0c703b9f975acc6afe79c2 100644 (file)
@@ -933,12 +933,18 @@ static irqreturn_t ocrdma_irq_handler(int irq, void *handle)
        struct ocrdma_eqe eqe;
        struct ocrdma_eqe *ptr;
        u16 cq_id;
+       u8 mcode;
        int budget = eq->cq_cnt;
 
        do {
                ptr = ocrdma_get_eqe(eq);
                eqe = *ptr;
                ocrdma_le32_to_cpu(&eqe, sizeof(eqe));
+               mcode = (eqe.id_valid & OCRDMA_EQE_MAJOR_CODE_MASK)
+                               >> OCRDMA_EQE_MAJOR_CODE_SHIFT;
+               if (mcode == OCRDMA_MAJOR_CODE_SENTINAL)
+                       pr_err("EQ full on eqid = 0x%x, eqe = 0x%x\n",
+                              eq->q.id, eqe.id_valid);
                if ((eqe.id_valid & OCRDMA_EQE_VALID_MASK) == 0)
                        break;
 
@@ -1434,27 +1440,30 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
        struct ocrdma_alloc_pd_range_rsp *rsp;
 
        /* Pre allocate the DPP PDs */
-       cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
-       if (!cmd)
-               return -ENOMEM;
-       cmd->pd_count = dev->attr.max_dpp_pds;
-       cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
-       status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
-       if (status)
-               goto mbx_err;
-       rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
-
-       if ((rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) && rsp->pd_count) {
-               dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >>
-                               OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
-               dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid &
-                               OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
-               dev->pd_mgr->max_dpp_pd = rsp->pd_count;
-               pd_bitmap_size = BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
-               dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size,
-                                                    GFP_KERNEL);
+       if (dev->attr.max_dpp_pds) {
+               cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE,
+                                         sizeof(*cmd));
+               if (!cmd)
+                       return -ENOMEM;
+               cmd->pd_count = dev->attr.max_dpp_pds;
+               cmd->enable_dpp_rsvd |= OCRDMA_ALLOC_PD_ENABLE_DPP;
+               status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
+               rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
+
+               if (!status && (rsp->dpp_page_pdid & OCRDMA_ALLOC_PD_RSP_DPP) &&
+                   rsp->pd_count) {
+                       dev->pd_mgr->dpp_page_index = rsp->dpp_page_pdid >>
+                                       OCRDMA_ALLOC_PD_RSP_DPP_PAGE_SHIFT;
+                       dev->pd_mgr->pd_dpp_start = rsp->dpp_page_pdid &
+                                       OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
+                       dev->pd_mgr->max_dpp_pd = rsp->pd_count;
+                       pd_bitmap_size =
+                               BITS_TO_LONGS(rsp->pd_count) * sizeof(long);
+                       dev->pd_mgr->pd_dpp_bitmap = kzalloc(pd_bitmap_size,
+                                                            GFP_KERNEL);
+               }
+               kfree(cmd);
        }
-       kfree(cmd);
 
        cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_ALLOC_PD_RANGE, sizeof(*cmd));
        if (!cmd)
@@ -1462,10 +1471,8 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
 
        cmd->pd_count = dev->attr.max_pd - dev->attr.max_dpp_pds;
        status = ocrdma_mbx_cmd(dev, (struct ocrdma_mqe *)cmd);
-       if (status)
-               goto mbx_err;
        rsp = (struct ocrdma_alloc_pd_range_rsp *)cmd;
-       if (rsp->pd_count) {
+       if (!status && rsp->pd_count) {
                dev->pd_mgr->pd_norm_start = rsp->dpp_page_pdid &
                                        OCRDMA_ALLOC_PD_RNG_RSP_START_PDID_MASK;
                dev->pd_mgr->max_normal_pd = rsp->pd_count;
@@ -1473,15 +1480,13 @@ static int ocrdma_mbx_alloc_pd_range(struct ocrdma_dev *dev)
                dev->pd_mgr->pd_norm_bitmap = kzalloc(pd_bitmap_size,
                                                      GFP_KERNEL);
        }
+       kfree(cmd);
 
        if (dev->pd_mgr->pd_norm_bitmap || dev->pd_mgr->pd_dpp_bitmap) {
                /* Enable PD resource manager */
                dev->pd_mgr->pd_prealloc_valid = true;
-       } else {
-               return -ENOMEM;
+               return 0;
        }
-mbx_err:
-       kfree(cmd);
        return status;
 }
 
@@ -2406,7 +2411,7 @@ int ocrdma_mbx_query_qp(struct ocrdma_dev *dev, struct ocrdma_qp *qp,
        struct ocrdma_query_qp *cmd;
        struct ocrdma_query_qp_rsp *rsp;
 
-       cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_QP, sizeof(*cmd));
+       cmd = ocrdma_init_emb_mqe(OCRDMA_CMD_QUERY_QP, sizeof(*rsp));
        if (!cmd)
                return status;
        cmd->qp_id = qp->id;
@@ -2428,7 +2433,7 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
        int status;
        struct ib_ah_attr *ah_attr = &attrs->ah_attr;
        union ib_gid sgid, zgid;
-       u32 vlan_id;
+       u32 vlan_id = 0xFFFF;
        u8 mac_addr[6];
        struct ocrdma_dev *dev = get_ocrdma_dev(qp->ibqp.device);
 
@@ -2468,12 +2473,22 @@ static int ocrdma_set_av_params(struct ocrdma_qp *qp,
        cmd->params.vlan_dmac_b4_to_b5 = mac_addr[4] | (mac_addr[5] << 8);
        if (attr_mask & IB_QP_VID) {
                vlan_id = attrs->vlan_id;
+       } else if (dev->pfc_state) {
+               vlan_id = 0;
+               pr_err("ocrdma%d:Using VLAN with PFC is recommended\n",
+                       dev->id);
+               pr_err("ocrdma%d:Using VLAN 0 for this connection\n",
+                       dev->id);
+       }
+
+       if (vlan_id < 0x1000) {
                cmd->params.vlan_dmac_b4_to_b5 |=
                    vlan_id << OCRDMA_QP_PARAMS_VLAN_SHIFT;
                cmd->flags |= OCRDMA_QP_PARA_VLAN_EN_VALID;
                cmd->params.rnt_rc_sl_fl |=
                        (dev->sl & 0x07) << OCRDMA_QP_PARAMS_SL_SHIFT;
        }
+
        return 0;
 }
 
@@ -2519,8 +2534,10 @@ static int ocrdma_set_qp_params(struct ocrdma_qp *qp,
                cmd->flags |= OCRDMA_QP_PARA_DST_QPN_VALID;
        }
        if (attr_mask & IB_QP_PATH_MTU) {
-               if (attrs->path_mtu < IB_MTU_256 ||
+               if (attrs->path_mtu < IB_MTU_512 ||
                    attrs->path_mtu > IB_MTU_4096) {
+                       pr_err("ocrdma%d: IB MTU %d is not supported\n",
+                              dev->id, ib_mtu_enum_to_int(attrs->path_mtu));
                        status = -EINVAL;
                        goto pmtu_err;
                }
@@ -3147,9 +3164,9 @@ void ocrdma_cleanup_hw(struct ocrdma_dev *dev)
        ocrdma_free_pd_pool(dev);
        ocrdma_mbx_delete_ah_tbl(dev);
 
-       /* cleanup the eqs */
-       ocrdma_destroy_eqs(dev);
-
        /* cleanup the control path */
        ocrdma_destroy_mq(dev);
+
+       /* cleanup the eqs */
+       ocrdma_destroy_eqs(dev);
 }
index 243c87c8bd65d09026f46ee12e3ee3b9109ce155..02ad0aee99afc0c5e9449c4f58353e57d38903f1 100644 (file)
@@ -1176,6 +1176,8 @@ struct ocrdma_query_qp_rsp {
        struct ocrdma_mqe_hdr hdr;
        struct ocrdma_mbx_rsp rsp;
        struct ocrdma_qp_params params;
+       u32 dpp_credits_cqid;
+       u32 rbq_id;
 };
 
 enum {
@@ -1624,12 +1626,19 @@ struct ocrdma_delete_ah_tbl_rsp {
 enum {
        OCRDMA_EQE_VALID_SHIFT          = 0,
        OCRDMA_EQE_VALID_MASK           = BIT(0),
+       OCRDMA_EQE_MAJOR_CODE_MASK      = 0x0E,
+       OCRDMA_EQE_MAJOR_CODE_SHIFT     = 0x01,
        OCRDMA_EQE_FOR_CQE_MASK         = 0xFFFE,
        OCRDMA_EQE_RESOURCE_ID_SHIFT    = 16,
        OCRDMA_EQE_RESOURCE_ID_MASK     = 0xFFFF <<
                                OCRDMA_EQE_RESOURCE_ID_SHIFT,
 };
 
+enum major_code {
+       OCRDMA_MAJOR_CODE_COMPLETION    = 0x00,
+       OCRDMA_MAJOR_CODE_SENTINAL      = 0x01
+};
+
 struct ocrdma_eqe {
        u32 id_valid;
 };
index 877175563634df79a889ed8a428405258b9df1e4..9dcb66077d6cbf9cd37bdaaa594414aadee4c96f 100644 (file)
@@ -365,7 +365,7 @@ static struct ocrdma_pd *_ocrdma_alloc_pd(struct ocrdma_dev *dev,
        if (!pd)
                return ERR_PTR(-ENOMEM);
 
-       if (udata && uctx) {
+       if (udata && uctx && dev->attr.max_dpp_pds) {
                pd->dpp_enabled =
                        ocrdma_get_asic_type(dev) == OCRDMA_ASIC_GEN_SKH_R;
                pd->num_dpp_qp =
@@ -1721,18 +1721,20 @@ int ocrdma_destroy_qp(struct ib_qp *ibqp)
        struct ocrdma_qp *qp;
        struct ocrdma_dev *dev;
        struct ib_qp_attr attrs;
-       int attr_mask = IB_QP_STATE;
+       int attr_mask;
        unsigned long flags;
 
        qp = get_ocrdma_qp(ibqp);
        dev = get_ocrdma_dev(ibqp->device);
 
-       attrs.qp_state = IB_QPS_ERR;
        pd = qp->pd;
 
        /* change the QP state to ERROR */
-       _ocrdma_modify_qp(ibqp, &attrs, attr_mask);
-
+       if (qp->state != OCRDMA_QPS_RST) {
+               attrs.qp_state = IB_QPS_ERR;
+               attr_mask = IB_QP_STATE;
+               _ocrdma_modify_qp(ibqp, &attrs, attr_mask);
+       }
        /* ensure that CQEs for newly created QP (whose id may be same with
         * one which just getting destroyed are same), dont get
         * discarded until the old CQEs are discarded.
index ffd48bfc4923457e5383345acfa3620fa5f6a52f..7df16f74bb4585e971e1208dd348911d142f78fb 100644 (file)
@@ -903,7 +903,7 @@ struct qib_devdata {
        /* PCI Device ID (here for NodeInfo) */
        u16 deviceid;
        /* for write combining settings */
-       unsigned long wc_cookie;
+       int wc_cookie;
        unsigned long wc_base;
        unsigned long wc_len;
 
@@ -1136,7 +1136,6 @@ extern struct qib_devdata *qib_lookup(int unit);
 extern u32 qib_cpulist_count;
 extern unsigned long *qib_cpulist;
 
-extern unsigned qib_wc_pat;
 extern unsigned qib_cc_table_size;
 int qib_init(struct qib_devdata *, int);
 int init_chip_wc_pat(struct qib_devdata *dd, u32);
index 9ea6c440a00ca7012f6422f1ffd7f525b8d80548..725881890c4a217247993f9fbb933ff11bb27e27 100644 (file)
@@ -835,7 +835,8 @@ static int mmap_piobufs(struct vm_area_struct *vma,
        vma->vm_flags &= ~VM_MAYREAD;
        vma->vm_flags |= VM_DONTCOPY | VM_DONTEXPAND;
 
-       if (qib_wc_pat)
+       /* We used PAT if wc_cookie == 0 */
+       if (!dd->wc_cookie)
                vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
 
        ret = io_remap_pfn_range(vma, vma->vm_start, phys >> PAGE_SHIFT,
index 0d2ba59af30af66bce01ef8132c8182cc6e44a33..4b927809d1a1191004799435c7aa9163d8d6f086 100644 (file)
@@ -3315,11 +3315,9 @@ static int init_6120_variables(struct qib_devdata *dd)
        qib_6120_config_ctxts(dd);
        qib_set_ctxtcnt(dd);
 
-       if (qib_wc_pat) {
-               ret = init_chip_wc_pat(dd, 0);
-               if (ret)
-                       goto bail;
-       }
+       ret = init_chip_wc_pat(dd, 0);
+       if (ret)
+               goto bail;
        set_6120_baseaddrs(dd); /* set chip access pointers now */
 
        ret = 0;
index 22affda8af88eacbd11f21abab55ba299dfb0e0b..00b2af211157b5513092e495d62a0a014a1e1c2f 100644 (file)
@@ -4126,11 +4126,9 @@ static int qib_init_7220_variables(struct qib_devdata *dd)
        qib_7220_config_ctxts(dd);
        qib_set_ctxtcnt(dd);  /* needed for PAT setup */
 
-       if (qib_wc_pat) {
-               ret = init_chip_wc_pat(dd, 0);
-               if (ret)
-                       goto bail;
-       }
+       ret = init_chip_wc_pat(dd, 0);
+       if (ret)
+               goto bail;
        set_7220_baseaddrs(dd); /* set chip access pointers now */
 
        ret = 0;
index ef97b71c8f7dd713a77401f593c6a10320a046e6..f32b4628e9913e17dfd0606e1713945ff65930f2 100644 (file)
@@ -6429,6 +6429,7 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
        unsigned features, pidx, sbufcnt;
        int ret, mtu;
        u32 sbufs, updthresh;
+       resource_size_t vl15off;
 
        /* pport structs are contiguous, allocated after devdata */
        ppd = (struct qib_pportdata *)(dd + 1);
@@ -6677,29 +6678,27 @@ static int qib_init_7322_variables(struct qib_devdata *dd)
        qib_7322_config_ctxts(dd);
        qib_set_ctxtcnt(dd);
 
-       if (qib_wc_pat) {
-               resource_size_t vl15off;
-               /*
-                * We do not set WC on the VL15 buffers to avoid
-                * a rare problem with unaligned writes from
-                * interrupt-flushed store buffers, so we need
-                * to map those separately here.  We can't solve
-                * this for the rarely used mtrr case.
-                */
-               ret = init_chip_wc_pat(dd, 0);
-               if (ret)
-                       goto bail;
+       /*
+        * We do not set WC on the VL15 buffers to avoid
+        * a rare problem with unaligned writes from
+        * interrupt-flushed store buffers, so we need
+        * to map those separately here.  We can't solve
+        * this for the rarely used mtrr case.
+        */
+       ret = init_chip_wc_pat(dd, 0);
+       if (ret)
+               goto bail;
 
-               /* vl15 buffers start just after the 4k buffers */
-               vl15off = dd->physaddr + (dd->piobufbase >> 32) +
-                       dd->piobcnt4k * dd->align4k;
-               dd->piovl15base = ioremap_nocache(vl15off,
-                                                 NUM_VL15_BUFS * dd->align4k);
-               if (!dd->piovl15base) {
-                       ret = -ENOMEM;
-                       goto bail;
-               }
+       /* vl15 buffers start just after the 4k buffers */
+       vl15off = dd->physaddr + (dd->piobufbase >> 32) +
+                 dd->piobcnt4k * dd->align4k;
+       dd->piovl15base = ioremap_nocache(vl15off,
+                                         NUM_VL15_BUFS * dd->align4k);
+       if (!dd->piovl15base) {
+               ret = -ENOMEM;
+               goto bail;
        }
+
        qib_7322_set_baseaddrs(dd); /* set chip access pointers now */
 
        ret = 0;
index 2ee36953e234c46ff704bc6e5dfbed8339c09974..7e00470adc30223c183f0e287f7a7d0d9beff944 100644 (file)
@@ -91,15 +91,6 @@ MODULE_PARM_DESC(krcvqs, "number of kernel receive queues per IB port");
 unsigned qib_cc_table_size;
 module_param_named(cc_table_size, qib_cc_table_size, uint, S_IRUGO);
 MODULE_PARM_DESC(cc_table_size, "Congestion control table entries 0 (CCA disabled - default), min = 128, max = 1984");
-/*
- * qib_wc_pat parameter:
- *      0 is WC via MTRR
- *      1 is WC via PAT
- *      If PAT initialization fails, code reverts back to MTRR
- */
-unsigned qib_wc_pat = 1; /* default (1) is to use PAT, not MTRR */
-module_param_named(wc_pat, qib_wc_pat, uint, S_IRUGO);
-MODULE_PARM_DESC(wc_pat, "enable write-combining via PAT mechanism");
 
 static void verify_interrupt(unsigned long);
 
@@ -1377,8 +1368,7 @@ static void cleanup_device_data(struct qib_devdata *dd)
                spin_unlock(&dd->pport[pidx].cc_shadow_lock);
        }
 
-       if (!qib_wc_pat)
-               qib_disable_wc(dd);
+       qib_disable_wc(dd);
 
        if (dd->pioavailregs_dma) {
                dma_free_coherent(&dd->pcidev->dev, PAGE_SIZE,
@@ -1547,14 +1537,12 @@ static int qib_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto bail;
        }
 
-       if (!qib_wc_pat) {
-               ret = qib_enable_wc(dd);
-               if (ret) {
-                       qib_dev_err(dd,
-                               "Write combining not enabled (err %d): performance may be poor\n",
-                               -ret);
-                       ret = 0;
-               }
+       ret = qib_enable_wc(dd);
+       if (ret) {
+               qib_dev_err(dd,
+                       "Write combining not enabled (err %d): performance may be poor\n",
+                       -ret);
+               ret = 0;
        }
 
        qib_verify_pioperf(dd);
index 81b225f2300aed34eab9b88077bd4c4097dceae2..edd0ddbd44815d8e48ed49d3dec46e37545aa031 100644 (file)
@@ -116,21 +116,10 @@ int qib_enable_wc(struct qib_devdata *dd)
        }
 
        if (!ret) {
-               int cookie;
-
-               cookie = mtrr_add(pioaddr, piolen, MTRR_TYPE_WRCOMB, 0);
-               if (cookie < 0) {
-                       {
-                               qib_devinfo(dd->pcidev,
-                                        "mtrr_add()  WC for PIO bufs failed (%d)\n",
-                                        cookie);
-                               ret = -EINVAL;
-                       }
-               } else {
-                       dd->wc_cookie = cookie;
-                       dd->wc_base = (unsigned long) pioaddr;
-                       dd->wc_len = (unsigned long) piolen;
-               }
+               dd->wc_cookie = arch_phys_wc_add(pioaddr, piolen);
+               if (dd->wc_cookie < 0)
+                       /* use error from routine */
+                       ret = dd->wc_cookie;
        }
 
        return ret;
@@ -142,18 +131,7 @@ int qib_enable_wc(struct qib_devdata *dd)
  */
 void qib_disable_wc(struct qib_devdata *dd)
 {
-       if (dd->wc_cookie) {
-               int r;
-
-               r = mtrr_del(dd->wc_cookie, dd->wc_base,
-                            dd->wc_len);
-               if (r < 0)
-                       qib_devinfo(dd->pcidev,
-                                "mtrr_del(%lx, %lx, %lx) failed: %d\n",
-                                dd->wc_cookie, dd->wc_base,
-                                dd->wc_len, r);
-               dd->wc_cookie = 0; /* even on failure */
-       }
+       arch_phys_wc_del(dd->wc_cookie);
 }
 
 /**
index 56959adb6c7da51ccbb6d20307247b7cb69ad55a..cf32a778e7d0ccc0b6225d9c01442f5d2ec4cdb1 100644 (file)
@@ -386,8 +386,8 @@ static int ipoib_cm_nonsrq_init_rx(struct net_device *dev, struct ib_cm_id *cm_i
                                           rx->rx_ring[i].mapping,
                                           GFP_KERNEL)) {
                        ipoib_warn(priv, "failed to allocate receive buffer %d\n", i);
-                               ret = -ENOMEM;
-                               goto err_count;
+                       ret = -ENOMEM;
+                       goto err_count;
                }
                ret = ipoib_cm_post_receive_nonsrq(dev, rx, &t->wr, t->sge, i);
                if (ret) {
index 327529ee85eb1ed20bb8b7afad8022df94118108..575a072d765f65cc49190a3066218759bd3569cd 100644 (file)
@@ -65,6 +65,8 @@ static int
 isert_rdma_accept(struct isert_conn *isert_conn);
 struct rdma_cm_id *isert_setup_id(struct isert_np *isert_np);
 
+static void isert_release_work(struct work_struct *work);
+
 static inline bool
 isert_prot_cmd(struct isert_conn *conn, struct se_cmd *cmd)
 {
@@ -547,11 +549,11 @@ isert_create_pi_ctx(struct fast_reg_descriptor *desc,
        return 0;
 
 err_prot_mr:
-       ib_dereg_mr(desc->pi_ctx->prot_mr);
+       ib_dereg_mr(pi_ctx->prot_mr);
 err_prot_frpl:
-       ib_free_fast_reg_page_list(desc->pi_ctx->prot_frpl);
+       ib_free_fast_reg_page_list(pi_ctx->prot_frpl);
 err_pi_ctx:
-       kfree(desc->pi_ctx);
+       kfree(pi_ctx);
 
        return ret;
 }
@@ -648,6 +650,7 @@ isert_init_conn(struct isert_conn *isert_conn)
        mutex_init(&isert_conn->mutex);
        spin_lock_init(&isert_conn->pool_lock);
        INIT_LIST_HEAD(&isert_conn->fr_pool);
+       INIT_WORK(&isert_conn->release_work, isert_release_work);
 }
 
 static void
@@ -925,6 +928,7 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id,
 {
        struct isert_np *isert_np = cma_id->context;
        struct isert_conn *isert_conn;
+       bool terminating = false;
 
        if (isert_np->np_cm_id == cma_id)
                return isert_np_cma_handler(cma_id->context, event);
@@ -932,12 +936,25 @@ isert_disconnected_handler(struct rdma_cm_id *cma_id,
        isert_conn = cma_id->qp->qp_context;
 
        mutex_lock(&isert_conn->mutex);
+       terminating = (isert_conn->state == ISER_CONN_TERMINATING);
        isert_conn_terminate(isert_conn);
        mutex_unlock(&isert_conn->mutex);
 
        isert_info("conn %p completing wait\n", isert_conn);
        complete(&isert_conn->wait);
 
+       if (terminating)
+               goto out;
+
+       mutex_lock(&isert_np->np_accept_mutex);
+       if (!list_empty(&isert_conn->accept_node)) {
+               list_del_init(&isert_conn->accept_node);
+               isert_put_conn(isert_conn);
+               queue_work(isert_release_wq, &isert_conn->release_work);
+       }
+       mutex_unlock(&isert_np->np_accept_mutex);
+
+out:
        return 0;
 }
 
@@ -2380,7 +2397,6 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
        page_off = offset % PAGE_SIZE;
 
        send_wr->sg_list = ib_sge;
-       send_wr->num_sge = sg_nents;
        send_wr->wr_id = (uintptr_t)&isert_cmd->tx_desc;
        /*
         * Perform mapping of TCM scatterlist memory ib_sge dma_addr.
@@ -2400,14 +2416,17 @@ isert_build_rdma_wr(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd,
                          ib_sge->addr, ib_sge->length, ib_sge->lkey);
                page_off = 0;
                data_left -= ib_sge->length;
+               if (!data_left)
+                       break;
                ib_sge++;
                isert_dbg("Incrementing ib_sge pointer to %p\n", ib_sge);
        }
 
+       send_wr->num_sge = ++i;
        isert_dbg("Set outgoing sg_list: %p num_sg: %u from TCM SGLs\n",
                  send_wr->sg_list, send_wr->num_sge);
 
-       return sg_nents;
+       return send_wr->num_sge;
 }
 
 static int
@@ -3366,7 +3385,6 @@ static void isert_wait_conn(struct iscsi_conn *conn)
        isert_wait4flush(isert_conn);
        isert_wait4logout(isert_conn);
 
-       INIT_WORK(&isert_conn->release_work, isert_release_work);
        queue_work(isert_release_wq, &isert_conn->release_work);
 }
 
@@ -3374,6 +3392,7 @@ static void isert_free_conn(struct iscsi_conn *conn)
 {
        struct isert_conn *isert_conn = conn->context;
 
+       isert_wait4flush(isert_conn);
        isert_put_conn(isert_conn);
 }
 
index f362883c94e37ce828d18938b7d25790135cf2d1..1d247bcf2ae25b20508f734d72b4904f704987d8 100644 (file)
@@ -747,6 +747,63 @@ static void joydev_cleanup(struct joydev *joydev)
                input_close_device(handle);
 }
 
+static bool joydev_dev_is_absolute_mouse(struct input_dev *dev)
+{
+       DECLARE_BITMAP(jd_scratch, KEY_CNT);
+
+       BUILD_BUG_ON(ABS_CNT > KEY_CNT || EV_CNT > KEY_CNT);
+
+       /*
+        * Virtualization (VMware, etc) and remote management (HP
+        * ILO2) solutions use absolute coordinates for their virtual
+        * pointing devices so that there is one-to-one relationship
+        * between pointer position on the host screen and virtual
+        * guest screen, and so their mice use ABS_X, ABS_Y and 3
+        * primary button events. This clashes with what joydev
+        * considers to be joysticks (a device with at minimum ABS_X
+        * axis).
+        *
+        * Here we are trying to separate absolute mice from
+        * joysticks. A device is, for joystick detection purposes,
+        * considered to be an absolute mouse if the following is
+        * true:
+        *
+        * 1) Event types are exactly EV_ABS, EV_KEY and EV_SYN.
+        * 2) Absolute events are exactly ABS_X and ABS_Y.
+        * 3) Keys are exactly BTN_LEFT, BTN_RIGHT and BTN_MIDDLE.
+        * 4) Device is not on "Amiga" bus.
+        */
+
+       bitmap_zero(jd_scratch, EV_CNT);
+       __set_bit(EV_ABS, jd_scratch);
+       __set_bit(EV_KEY, jd_scratch);
+       __set_bit(EV_SYN, jd_scratch);
+       if (!bitmap_equal(jd_scratch, dev->evbit, EV_CNT))
+               return false;
+
+       bitmap_zero(jd_scratch, ABS_CNT);
+       __set_bit(ABS_X, jd_scratch);
+       __set_bit(ABS_Y, jd_scratch);
+       if (!bitmap_equal(dev->absbit, jd_scratch, ABS_CNT))
+               return false;
+
+       bitmap_zero(jd_scratch, KEY_CNT);
+       __set_bit(BTN_LEFT, jd_scratch);
+       __set_bit(BTN_RIGHT, jd_scratch);
+       __set_bit(BTN_MIDDLE, jd_scratch);
+
+       if (!bitmap_equal(dev->keybit, jd_scratch, KEY_CNT))
+               return false;
+
+       /*
+        * Amiga joystick (amijoy) historically uses left/middle/right
+        * button events.
+        */
+       if (dev->id.bustype == BUS_AMIGA)
+               return false;
+
+       return true;
+}
 
 static bool joydev_match(struct input_handler *handler, struct input_dev *dev)
 {
@@ -758,6 +815,10 @@ static bool joydev_match(struct input_handler *handler, struct input_dev *dev)
        if (test_bit(EV_KEY, dev->evbit) && test_bit(BTN_DIGI, dev->keybit))
                return false;
 
+       /* Avoid absolute mice */
+       if (joydev_dev_is_absolute_mouse(dev))
+               return false;
+
        return true;
 }
 
index 7462d2fc8cfed8d4bf11d77fb2bb5e853d9f00eb..d7820d1152d2ef2d78f39c16281a9c316906e7ae 100644 (file)
@@ -156,7 +156,7 @@ config MOUSE_PS2_VMMOUSE
          Say Y here if you are running under control of VMware hypervisor
          (ESXi, Workstation or Fusion). Also make sure that when you enable
          this option, you remove the xf86-input-vmmouse user-space driver
-         or upgrade it to at least xf86-input-vmmouse 13.0.1, which doesn't
+         or upgrade it to at least xf86-input-vmmouse 13.1.0, which doesn't
          load in the presence of an in-kernel vmmouse driver.
 
          If unsure, say N.
index e6708f6efb4db7189dccdc1ed1fc0b3ca117a6ef..a353b7de6d22e91a52378cd4c106b17cafc26a07 100644 (file)
@@ -941,6 +941,11 @@ static void alps_get_finger_coordinate_v7(struct input_mt_pos *mt,
        case V7_PACKET_ID_TWO:
                mt[1].x &= ~0x000F;
                mt[1].y |= 0x000F;
+               /* Detect false-postive touches where x & y report max value */
+               if (mt[1].y == 0x7ff && mt[1].x == 0xff0) {
+                       mt[1].x = 0;
+                       /* y gets set to 0 at the end of this function */
+               }
                break;
 
        case V7_PACKET_ID_MULTI:
@@ -1058,9 +1063,8 @@ static void alps_process_trackstick_packet_v7(struct psmouse *psmouse)
        right = (packet[1] & 0x02) >> 1;
        middle = (packet[1] & 0x04) >> 2;
 
-       /* Divide 2 since trackpoint's speed is too fast */
-       input_report_rel(dev2, REL_X, (char)x / 2);
-       input_report_rel(dev2, REL_Y, -((char)y / 2));
+       input_report_rel(dev2, REL_X, (char)x);
+       input_report_rel(dev2, REL_Y, -((char)y));
 
        input_report_key(dev2, BTN_LEFT, left);
        input_report_key(dev2, BTN_RIGHT, right);
index 991dc6b20a58594cbc28ee1dfc8f45d677497fe5..ce3d40004458c87392339472f654462fae7cf0bc 100644 (file)
@@ -315,7 +315,7 @@ static void elantech_report_semi_mt_data(struct input_dev *dev,
                                         unsigned int x2, unsigned int y2)
 {
        elantech_set_slot(dev, 0, num_fingers != 0, x1, y1);
-       elantech_set_slot(dev, 1, num_fingers == 2, x2, y2);
+       elantech_set_slot(dev, 1, num_fingers >= 2, x2, y2);
 }
 
 /*
@@ -1376,10 +1376,11 @@ static bool elantech_is_signature_valid(const unsigned char *param)
                return true;
 
        /*
-        * Some models have a revision higher then 20. Meaning param[2] may
-        * be 10 or 20, skip the rates check for these.
+        * Some hw_version >= 4 models have a revision higher then 20. Meaning
+        * that param[2] may be 10 or 20, skip the rates check for these.
         */
-       if (param[0] == 0x46 && (param[1] & 0xef) == 0x0f && param[2] < 40)
+       if ((param[0] & 0x0f) >= 0x06 && (param[1] & 0xaf) == 0x0f &&
+           param[2] < 40)
                return true;
 
        for (i = 0; i < ARRAY_SIZE(rates); i++)
@@ -1555,6 +1556,7 @@ static int elantech_set_properties(struct elantech_data *etd)
                case 9:
                case 10:
                case 13:
+               case 14:
                        etd->hw_version = 4;
                        break;
                default:
index 630af73e98c488a5e266e4ccb6eed5dba622f3d3..35c8d0ceabeebf989b8eeff5cd54ee8f3ac2e247 100644 (file)
@@ -150,6 +150,11 @@ static const struct min_max_quirk min_max_pnpid_table[] = {
                {ANY_BOARD_ID, 2961},
                1024, 5112, 2024, 4832
        },
+       {
+               (const char * const []){"LEN2000", NULL},
+               {ANY_BOARD_ID, ANY_BOARD_ID},
+               1024, 5113, 2021, 4832
+       },
        {
                (const char * const []){"LEN2001", NULL},
                {ANY_BOARD_ID, ANY_BOARD_ID},
@@ -191,7 +196,7 @@ static const char * const topbuttonpad_pnp_ids[] = {
        "LEN0045",
        "LEN0047",
        "LEN0049",
-       "LEN2000",
+       "LEN2000", /* S540 */
        "LEN2001", /* Edge E431 */
        "LEN2002", /* Edge E531 */
        "LEN2003",
index 2d5ff86b343fbc30f1abf9e363333164311bfec6..e4c31256a74dbeddb70c74ec628ed856065cdff4 100644 (file)
@@ -164,7 +164,7 @@ static irqreturn_t stmpe_ts_handler(int irq, void *data)
                        STMPE_TSC_CTRL_TSC_EN, STMPE_TSC_CTRL_TSC_EN);
 
        /* start polling for touch_det to detect release */
-       schedule_delayed_work(&ts->work, HZ / 50);
+       schedule_delayed_work(&ts->work, msecs_to_jiffies(50));
 
        return IRQ_HANDLED;
 }
index aecb9ad2e7016885cda6ee2a7c08af1e461c9980..642f4a53de509f2f240f4cd5279ce2749d235455 100644 (file)
@@ -187,7 +187,7 @@ static int sx8654_probe(struct i2c_client *client,
                return -ENOMEM;
 
        input = devm_input_allocate_device(&client->dev);
-       if (!sx8654)
+       if (!input)
                return -ENOMEM;
 
        input->name = "SX8654 I2C Touchscreen";
index e43d48956dea239fe6816bdb23f0174754c623ee..fffea87a014f9ecf58ce9f2d55901b7d83a8bab9 100644 (file)
@@ -34,6 +34,7 @@
 #include <linux/irq.h>
 #include <linux/msi.h>
 #include <linux/dma-contiguous.h>
+#include <linux/irqdomain.h>
 #include <asm/irq_remapping.h>
 #include <asm/io_apic.h>
 #include <asm/apic.h>
@@ -2930,6 +2931,7 @@ static void *alloc_coherent(struct device *dev, size_t size,
        size      = PAGE_ALIGN(size);
        dma_mask  = dev->coherent_dma_mask;
        flag     &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
+       flag     |= __GFP_ZERO;
 
        page = alloc_pages(flag | __GFP_NOWARN,  get_order(size));
        if (!page) {
@@ -3851,6 +3853,21 @@ union irte {
        } fields;
 };
 
+struct irq_2_irte {
+       u16 devid; /* Device ID for IRTE table */
+       u16 index; /* Index into IRTE table*/
+};
+
+struct amd_ir_data {
+       struct irq_2_irte                       irq_2_irte;
+       union irte                              irte_entry;
+       union {
+               struct msi_msg                  msi_entry;
+       };
+};
+
+static struct irq_chip amd_ir_chip;
+
 #define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6)
 #define DTE_IRQ_REMAP_INTCTL    (2ULL << 60)
 #define DTE_IRQ_TABLE_LEN       (8ULL << 1)
@@ -3944,7 +3961,7 @@ out_unlock:
        return table;
 }
 
-static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count)
+static int alloc_irq_index(u16 devid, int count)
 {
        struct irq_remap_table *table;
        unsigned long flags;
@@ -3966,18 +3983,10 @@ static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count)
                        c = 0;
 
                if (c == count) {
-                       struct irq_2_irte *irte_info;
-
                        for (; c != 0; --c)
                                table->table[index - c + 1] = IRTE_ALLOCATED;
 
                        index -= count - 1;
-
-                       cfg->remapped         = 1;
-                       irte_info             = &cfg->irq_2_irte;
-                       irte_info->devid      = devid;
-                       irte_info->index      = index;
-
                        goto out;
                }
        }
@@ -3990,22 +3999,6 @@ out:
        return index;
 }
 
-static int get_irte(u16 devid, int index, union irte *irte)
-{
-       struct irq_remap_table *table;
-       unsigned long flags;
-
-       table = get_irq_table(devid, false);
-       if (!table)
-               return -ENOMEM;
-
-       spin_lock_irqsave(&table->lock, flags);
-       irte->val = table->table[index];
-       spin_unlock_irqrestore(&table->lock, flags);
-
-       return 0;
-}
-
 static int modify_irte(u16 devid, int index, union irte irte)
 {
        struct irq_remap_table *table;
@@ -4052,243 +4045,316 @@ static void free_irte(u16 devid, int index)
        iommu_completion_wait(iommu);
 }
 
-static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry,
-                             unsigned int destination, int vector,
-                             struct io_apic_irq_attr *attr)
+static int get_devid(struct irq_alloc_info *info)
 {
-       struct irq_remap_table *table;
-       struct irq_2_irte *irte_info;
-       struct irq_cfg *cfg;
-       union irte irte;
-       int ioapic_id;
-       int index;
-       int devid;
-       int ret;
-
-       cfg = irq_cfg(irq);
-       if (!cfg)
-               return -EINVAL;
-
-       irte_info = &cfg->irq_2_irte;
-       ioapic_id = mpc_ioapic_id(attr->ioapic);
-       devid     = get_ioapic_devid(ioapic_id);
-
-       if (devid < 0)
-               return devid;
-
-       table = get_irq_table(devid, true);
-       if (table == NULL)
-               return -ENOMEM;
-
-       index = attr->ioapic_pin;
+       int devid = -1;
 
-       /* Setup IRQ remapping info */
-       cfg->remapped         = 1;
-       irte_info->devid      = devid;
-       irte_info->index      = index;
+       switch (info->type) {
+       case X86_IRQ_ALLOC_TYPE_IOAPIC:
+               devid     = get_ioapic_devid(info->ioapic_id);
+               break;
+       case X86_IRQ_ALLOC_TYPE_HPET:
+               devid     = get_hpet_devid(info->hpet_id);
+               break;
+       case X86_IRQ_ALLOC_TYPE_MSI:
+       case X86_IRQ_ALLOC_TYPE_MSIX:
+               devid = get_device_id(&info->msi_dev->dev);
+               break;
+       default:
+               BUG_ON(1);
+               break;
+       }
 
-       /* Setup IRTE for IOMMU */
-       irte.val                = 0;
-       irte.fields.vector      = vector;
-       irte.fields.int_type    = apic->irq_delivery_mode;
-       irte.fields.destination = destination;
-       irte.fields.dm          = apic->irq_dest_mode;
-       irte.fields.valid       = 1;
-
-       ret = modify_irte(devid, index, irte);
-       if (ret)
-               return ret;
+       return devid;
+}
 
-       /* Setup IOAPIC entry */
-       memset(entry, 0, sizeof(*entry));
+static struct irq_domain *get_ir_irq_domain(struct irq_alloc_info *info)
+{
+       struct amd_iommu *iommu;
+       int devid;
 
-       entry->vector        = index;
-       entry->mask          = 0;
-       entry->trigger       = attr->trigger;
-       entry->polarity      = attr->polarity;
+       if (!info)
+               return NULL;
 
-       /*
-        * Mask level triggered irqs.
-        */
-       if (attr->trigger)
-               entry->mask = 1;
+       devid = get_devid(info);
+       if (devid >= 0) {
+               iommu = amd_iommu_rlookup_table[devid];
+               if (iommu)
+                       return iommu->ir_domain;
+       }
 
-       return 0;
+       return NULL;
 }
 
-static int set_affinity(struct irq_data *data, const struct cpumask *mask,
-                       bool force)
+static struct irq_domain *get_irq_domain(struct irq_alloc_info *info)
 {
-       struct irq_2_irte *irte_info;
-       unsigned int dest, irq;
-       struct irq_cfg *cfg;
-       union irte irte;
-       int err;
-
-       if (!config_enabled(CONFIG_SMP))
-               return -1;
-
-       cfg       = irqd_cfg(data);
-       irq       = data->irq;
-       irte_info = &cfg->irq_2_irte;
+       struct amd_iommu *iommu;
+       int devid;
 
-       if (!cpumask_intersects(mask, cpu_online_mask))
-               return -EINVAL;
+       if (!info)
+               return NULL;
 
-       if (get_irte(irte_info->devid, irte_info->index, &irte))
-               return -EBUSY;
+       switch (info->type) {
+       case X86_IRQ_ALLOC_TYPE_MSI:
+       case X86_IRQ_ALLOC_TYPE_MSIX:
+               devid = get_device_id(&info->msi_dev->dev);
+               if (devid >= 0) {
+                       iommu = amd_iommu_rlookup_table[devid];
+                       if (iommu)
+                               return iommu->msi_domain;
+               }
+               break;
+       default:
+               break;
+       }
 
-       if (assign_irq_vector(irq, cfg, mask))
-               return -EBUSY;
+       return NULL;
+}
 
-       err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest);
-       if (err) {
-               if (assign_irq_vector(irq, cfg, data->affinity))
-                       pr_err("AMD-Vi: Failed to recover vector for irq %d\n", irq);
-               return err;
-       }
+struct irq_remap_ops amd_iommu_irq_ops = {
+       .prepare                = amd_iommu_prepare,
+       .enable                 = amd_iommu_enable,
+       .disable                = amd_iommu_disable,
+       .reenable               = amd_iommu_reenable,
+       .enable_faulting        = amd_iommu_enable_faulting,
+       .get_ir_irq_domain      = get_ir_irq_domain,
+       .get_irq_domain         = get_irq_domain,
+};
 
-       irte.fields.vector      = cfg->vector;
-       irte.fields.destination = dest;
+static void irq_remapping_prepare_irte(struct amd_ir_data *data,
+                                      struct irq_cfg *irq_cfg,
+                                      struct irq_alloc_info *info,
+                                      int devid, int index, int sub_handle)
+{
+       struct irq_2_irte *irte_info = &data->irq_2_irte;
+       struct msi_msg *msg = &data->msi_entry;
+       union irte *irte = &data->irte_entry;
+       struct IO_APIC_route_entry *entry;
 
-       modify_irte(irte_info->devid, irte_info->index, irte);
+       data->irq_2_irte.devid = devid;
+       data->irq_2_irte.index = index + sub_handle;
 
-       if (cfg->move_in_progress)
-               send_cleanup_vector(cfg);
+       /* Setup IRTE for IOMMU */
+       irte->val = 0;
+       irte->fields.vector      = irq_cfg->vector;
+       irte->fields.int_type    = apic->irq_delivery_mode;
+       irte->fields.destination = irq_cfg->dest_apicid;
+       irte->fields.dm          = apic->irq_dest_mode;
+       irte->fields.valid       = 1;
+
+       switch (info->type) {
+       case X86_IRQ_ALLOC_TYPE_IOAPIC:
+               /* Setup IOAPIC entry */
+               entry = info->ioapic_entry;
+               info->ioapic_entry = NULL;
+               memset(entry, 0, sizeof(*entry));
+               entry->vector        = index;
+               entry->mask          = 0;
+               entry->trigger       = info->ioapic_trigger;
+               entry->polarity      = info->ioapic_polarity;
+               /* Mask level triggered irqs. */
+               if (info->ioapic_trigger)
+                       entry->mask = 1;
+               break;
 
-       cpumask_copy(data->affinity, mask);
+       case X86_IRQ_ALLOC_TYPE_HPET:
+       case X86_IRQ_ALLOC_TYPE_MSI:
+       case X86_IRQ_ALLOC_TYPE_MSIX:
+               msg->address_hi = MSI_ADDR_BASE_HI;
+               msg->address_lo = MSI_ADDR_BASE_LO;
+               msg->data = irte_info->index;
+               break;
 
-       return 0;
+       default:
+               BUG_ON(1);
+               break;
+       }
 }
 
-static int free_irq(int irq)
+static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq,
+                              unsigned int nr_irqs, void *arg)
 {
-       struct irq_2_irte *irte_info;
+       struct irq_alloc_info *info = arg;
+       struct irq_data *irq_data;
+       struct amd_ir_data *data;
        struct irq_cfg *cfg;
+       int i, ret, devid;
+       int index = -1;
 
-       cfg = irq_cfg(irq);
-       if (!cfg)
+       if (!info)
+               return -EINVAL;
+       if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_MSI &&
+           info->type != X86_IRQ_ALLOC_TYPE_MSIX)
                return -EINVAL;
 
-       irte_info = &cfg->irq_2_irte;
-
-       free_irte(irte_info->devid, irte_info->index);
+       /*
+        * With IRQ remapping enabled, don't need contiguous CPU vectors
+        * to support multiple MSI interrupts.
+        */
+       if (info->type == X86_IRQ_ALLOC_TYPE_MSI)
+               info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
 
-       return 0;
-}
+       devid = get_devid(info);
+       if (devid < 0)
+               return -EINVAL;
 
-static void compose_msi_msg(struct pci_dev *pdev,
-                           unsigned int irq, unsigned int dest,
-                           struct msi_msg *msg, u8 hpet_id)
-{
-       struct irq_2_irte *irte_info;
-       struct irq_cfg *cfg;
-       union irte irte;
+       ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
+       if (ret < 0)
+               return ret;
 
-       cfg = irq_cfg(irq);
-       if (!cfg)
-               return;
+       ret = -ENOMEM;
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
+               goto out_free_parent;
 
-       irte_info = &cfg->irq_2_irte;
+       if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) {
+               if (get_irq_table(devid, true))
+                       index = info->ioapic_pin;
+               else
+                       ret = -ENOMEM;
+       } else {
+               index = alloc_irq_index(devid, nr_irqs);
+       }
+       if (index < 0) {
+               pr_warn("Failed to allocate IRTE\n");
+               kfree(data);
+               goto out_free_parent;
+       }
 
-       irte.val                = 0;
-       irte.fields.vector      = cfg->vector;
-       irte.fields.int_type    = apic->irq_delivery_mode;
-       irte.fields.destination = dest;
-       irte.fields.dm          = apic->irq_dest_mode;
-       irte.fields.valid       = 1;
+       for (i = 0; i < nr_irqs; i++) {
+               irq_data = irq_domain_get_irq_data(domain, virq + i);
+               cfg = irqd_cfg(irq_data);
+               if (!irq_data || !cfg) {
+                       ret = -EINVAL;
+                       goto out_free_data;
+               }
 
-       modify_irte(irte_info->devid, irte_info->index, irte);
+               if (i > 0) {
+                       data = kzalloc(sizeof(*data), GFP_KERNEL);
+                       if (!data)
+                               goto out_free_data;
+               }
+               irq_data->hwirq = (devid << 16) + i;
+               irq_data->chip_data = data;
+               irq_data->chip = &amd_ir_chip;
+               irq_remapping_prepare_irte(data, cfg, info, devid, index, i);
+               irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
+       }
+       return 0;
 
-       msg->address_hi = MSI_ADDR_BASE_HI;
-       msg->address_lo = MSI_ADDR_BASE_LO;
-       msg->data       = irte_info->index;
+out_free_data:
+       for (i--; i >= 0; i--) {
+               irq_data = irq_domain_get_irq_data(domain, virq + i);
+               if (irq_data)
+                       kfree(irq_data->chip_data);
+       }
+       for (i = 0; i < nr_irqs; i++)
+               free_irte(devid, index + i);
+out_free_parent:
+       irq_domain_free_irqs_common(domain, virq, nr_irqs);
+       return ret;
 }
 
-static int msi_alloc_irq(struct pci_dev *pdev, int irq, int nvec)
+static void irq_remapping_free(struct irq_domain *domain, unsigned int virq,
+                              unsigned int nr_irqs)
 {
-       struct irq_cfg *cfg;
-       int index;
-       u16 devid;
-
-       if (!pdev)
-               return -EINVAL;
+       struct irq_2_irte *irte_info;
+       struct irq_data *irq_data;
+       struct amd_ir_data *data;
+       int i;
 
-       cfg = irq_cfg(irq);
-       if (!cfg)
-               return -EINVAL;
+       for (i = 0; i < nr_irqs; i++) {
+               irq_data = irq_domain_get_irq_data(domain, virq  + i);
+               if (irq_data && irq_data->chip_data) {
+                       data = irq_data->chip_data;
+                       irte_info = &data->irq_2_irte;
+                       free_irte(irte_info->devid, irte_info->index);
+                       kfree(data);
+               }
+       }
+       irq_domain_free_irqs_common(domain, virq, nr_irqs);
+}
 
-       devid = get_device_id(&pdev->dev);
-       index = alloc_irq_index(cfg, devid, nvec);
+static void irq_remapping_activate(struct irq_domain *domain,
+                                  struct irq_data *irq_data)
+{
+       struct amd_ir_data *data = irq_data->chip_data;
+       struct irq_2_irte *irte_info = &data->irq_2_irte;
 
-       return index < 0 ? MAX_IRQS_PER_TABLE : index;
+       modify_irte(irte_info->devid, irte_info->index, data->irte_entry);
 }
 
-static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
-                        int index, int offset)
+static void irq_remapping_deactivate(struct irq_domain *domain,
+                                    struct irq_data *irq_data)
 {
-       struct irq_2_irte *irte_info;
-       struct irq_cfg *cfg;
-       u16 devid;
+       struct amd_ir_data *data = irq_data->chip_data;
+       struct irq_2_irte *irte_info = &data->irq_2_irte;
+       union irte entry;
 
-       if (!pdev)
-               return -EINVAL;
+       entry.val = 0;
+       modify_irte(irte_info->devid, irte_info->index, data->irte_entry);
+}
 
-       cfg = irq_cfg(irq);
-       if (!cfg)
-               return -EINVAL;
+static struct irq_domain_ops amd_ir_domain_ops = {
+       .alloc = irq_remapping_alloc,
+       .free = irq_remapping_free,
+       .activate = irq_remapping_activate,
+       .deactivate = irq_remapping_deactivate,
+};
 
-       if (index >= MAX_IRQS_PER_TABLE)
-               return 0;
+static int amd_ir_set_affinity(struct irq_data *data,
+                              const struct cpumask *mask, bool force)
+{
+       struct amd_ir_data *ir_data = data->chip_data;
+       struct irq_2_irte *irte_info = &ir_data->irq_2_irte;
+       struct irq_cfg *cfg = irqd_cfg(data);
+       struct irq_data *parent = data->parent_data;
+       int ret;
 
-       devid           = get_device_id(&pdev->dev);
-       irte_info       = &cfg->irq_2_irte;
+       ret = parent->chip->irq_set_affinity(parent, mask, force);
+       if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
+               return ret;
 
-       cfg->remapped         = 1;
-       irte_info->devid      = devid;
-       irte_info->index      = index + offset;
+       /*
+        * Atomically updates the IRTE with the new destination, vector
+        * and flushes the interrupt entry cache.
+        */
+       ir_data->irte_entry.fields.vector = cfg->vector;
+       ir_data->irte_entry.fields.destination = cfg->dest_apicid;
+       modify_irte(irte_info->devid, irte_info->index, ir_data->irte_entry);
 
-       return 0;
+       /*
+        * After this point, all the interrupts will start arriving
+        * at the new destination. So, time to cleanup the previous
+        * vector allocation.
+        */
+       send_cleanup_vector(cfg);
+
+       return IRQ_SET_MASK_OK_DONE;
 }
 
-static int alloc_hpet_msi(unsigned int irq, unsigned int id)
+static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg)
 {
-       struct irq_2_irte *irte_info;
-       struct irq_cfg *cfg;
-       int index, devid;
+       struct amd_ir_data *ir_data = irq_data->chip_data;
 
-       cfg = irq_cfg(irq);
-       if (!cfg)
-               return -EINVAL;
+       *msg = ir_data->msi_entry;
+}
 
-       irte_info = &cfg->irq_2_irte;
-       devid     = get_hpet_devid(id);
-       if (devid < 0)
-               return devid;
+static struct irq_chip amd_ir_chip = {
+       .irq_ack = ir_ack_apic_edge,
+       .irq_set_affinity = amd_ir_set_affinity,
+       .irq_compose_msi_msg = ir_compose_msi_msg,
+};
 
-       index = alloc_irq_index(cfg, devid, 1);
-       if (index < 0)
-               return index;
+int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
+{
+       iommu->ir_domain = irq_domain_add_tree(NULL, &amd_ir_domain_ops, iommu);
+       if (!iommu->ir_domain)
+               return -ENOMEM;
 
-       cfg->remapped         = 1;
-       irte_info->devid      = devid;
-       irte_info->index      = index;
+       iommu->ir_domain->parent = arch_get_ir_parent_domain();
+       iommu->msi_domain = arch_create_msi_irq_domain(iommu->ir_domain);
 
        return 0;
 }
-
-struct irq_remap_ops amd_iommu_irq_ops = {
-       .prepare                = amd_iommu_prepare,
-       .enable                 = amd_iommu_enable,
-       .disable                = amd_iommu_disable,
-       .reenable               = amd_iommu_reenable,
-       .enable_faulting        = amd_iommu_enable_faulting,
-       .setup_ioapic_entry     = setup_ioapic_entry,
-       .set_affinity           = set_affinity,
-       .free_irq               = free_irq,
-       .compose_msi_msg        = compose_msi_msg,
-       .msi_alloc_irq          = msi_alloc_irq,
-       .msi_setup_irq          = msi_setup_irq,
-       .alloc_hpet_msi         = alloc_hpet_msi,
-};
 #endif
index 450ef5001a65ab3bea19e1a9648324eea9951ede..c17df04d7a7fff55763b8eb3a0781b1f8206a683 100644 (file)
@@ -1124,6 +1124,10 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
        if (ret)
                return ret;
 
+       ret = amd_iommu_create_irq_domain(iommu);
+       if (ret)
+               return ret;
+
        /*
         * Make sure IOMMU is not considered to translate itself. The IVRS
         * table tells us so, but this is a lie!
index 72b0fd455e2444cc12a9ee2b678c6b0456c80503..0a21142d3639d0741052be510a84df7f3fe2ef5a 100644 (file)
@@ -62,6 +62,15 @@ extern u8 amd_iommu_pc_get_max_counters(u16 devid);
 extern int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn,
                                    u64 *value, bool is_write);
 
+#ifdef CONFIG_IRQ_REMAP
+extern int amd_iommu_create_irq_domain(struct amd_iommu *iommu);
+#else
+static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu)
+{
+       return 0;
+}
+#endif
+
 #define PPR_SUCCESS                    0x0
 #define PPR_INVALID                    0x1
 #define PPR_FAILURE                    0xf
index 05030e523771a6ee3befbe890ed45b47a86f8f7f..6533e874c9d77972f0523cfca326830d02e70dd7 100644 (file)
@@ -398,6 +398,7 @@ struct amd_iommu_fault {
 
 
 struct iommu_domain;
+struct irq_domain;
 
 /*
  * This structure contains generic data for  IOMMU protection domains
@@ -579,6 +580,10 @@ struct amd_iommu {
        /* The maximum PC banks and counters/bank (PCSup=1) */
        u8 max_banks;
        u8 max_counters;
+#ifdef CONFIG_IRQ_REMAP
+       struct irq_domain *ir_domain;
+       struct irq_domain *msi_domain;
+#endif
 };
 
 struct devid_map {
index a1cbba9056fdba15b1334bd923714c65957fc20c..3465faf1809e4cb1d6630e5cdc8f87cd4e405bd2 100644 (file)
@@ -266,6 +266,7 @@ static void put_pasid_state(struct pasid_state *pasid_state)
 
 static void put_pasid_state_wait(struct pasid_state *pasid_state)
 {
+       atomic_dec(&pasid_state->count);
        wait_event(pasid_state->wq, !atomic_read(&pasid_state->count));
        free_pasid_state(pasid_state);
 }
index 9f7e1d34a32bc8ec75c6470260c66f839419f502..66a803b9dd3af928024d853995ed31383e0a8dd2 100644 (file)
 #define RESUME_TERMINATE               (1 << 0)
 
 #define TTBCR2_SEP_SHIFT               15
-#define TTBCR2_SEP_MASK                        0x7
-
-#define TTBCR2_ADDR_32                 0
-#define TTBCR2_ADDR_36                 1
-#define TTBCR2_ADDR_40                 2
-#define TTBCR2_ADDR_42                 3
-#define TTBCR2_ADDR_44                 4
-#define TTBCR2_ADDR_48                 5
+#define TTBCR2_SEP_UPSTREAM            (0x7 << TTBCR2_SEP_SHIFT)
 
 #define TTBRn_HI_ASID_SHIFT            16
 
@@ -793,26 +786,7 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
                writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
                if (smmu->version > ARM_SMMU_V1) {
                        reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
-                       switch (smmu->va_size) {
-                       case 32:
-                               reg |= (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT);
-                               break;
-                       case 36:
-                               reg |= (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT);
-                               break;
-                       case 40:
-                               reg |= (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT);
-                               break;
-                       case 42:
-                               reg |= (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT);
-                               break;
-                       case 44:
-                               reg |= (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT);
-                               break;
-                       case 48:
-                               reg |= (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT);
-                               break;
-                       }
+                       reg |= TTBCR2_SEP_UPSTREAM;
                        writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
                }
        } else {
index 9847613085e157976707e0d1aa0cc87c3e8b3c68..536f2d8ea41abe502a88628ffc54cc61db6a6ff7 100644 (file)
@@ -1087,8 +1087,8 @@ static void free_iommu(struct intel_iommu *iommu)
 
        if (iommu->irq) {
                free_irq(iommu->irq, iommu);
-               irq_set_handler_data(iommu->irq, NULL);
                dmar_free_hwirq(iommu->irq);
+               iommu->irq = 0;
        }
 
        if (iommu->qi) {
@@ -1642,23 +1642,14 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
        if (iommu->irq)
                return 0;
 
-       irq = dmar_alloc_hwirq();
-       if (irq <= 0) {
+       irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu);
+       if (irq > 0) {
+               iommu->irq = irq;
+       } else {
                pr_err("IOMMU: no free vectors\n");
                return -EINVAL;
        }
 
-       irq_set_handler_data(irq, iommu);
-       iommu->irq = irq;
-
-       ret = arch_setup_dmar_msi(irq);
-       if (ret) {
-               irq_set_handler_data(irq, NULL);
-               iommu->irq = 0;
-               dmar_free_hwirq(irq);
-               return ret;
-       }
-
        ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
        if (ret)
                pr_err("IOMMU: can't request irq\n");
index 68d43beccb7e560f845ad49b8ae7d9e38872fcf7..5ecfaf29933ad4634e2124544e3c800b9b309d44 100644 (file)
@@ -422,6 +422,14 @@ static int dmar_map_gfx = 1;
 static int dmar_forcedac;
 static int intel_iommu_strict;
 static int intel_iommu_superpage = 1;
+static int intel_iommu_ecs = 1;
+
+/* We only actually use ECS when PASID support (on the new bit 40)
+ * is also advertised. Some early implementations â€” the ones with
+ * PASID support on bit 28 â€” have issues even when we *only* use
+ * extended root/context tables. */
+#define ecs_enabled(iommu) (intel_iommu_ecs && ecap_ecs(iommu->ecap) && \
+                           ecap_pasid(iommu->ecap))
 
 int intel_iommu_gfx_mapped;
 EXPORT_SYMBOL_GPL(intel_iommu_gfx_mapped);
@@ -465,6 +473,10 @@ static int __init intel_iommu_setup(char *str)
                        printk(KERN_INFO
                                "Intel-IOMMU: disable supported super page\n");
                        intel_iommu_superpage = 0;
+               } else if (!strncmp(str, "ecs_off", 7)) {
+                       printk(KERN_INFO
+                               "Intel-IOMMU: disable extended context table support\n");
+                       intel_iommu_ecs = 0;
                }
 
                str += strcspn(str, ",");
@@ -669,7 +681,7 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
        struct context_entry *context;
        u64 *entry;
 
-       if (ecap_ecs(iommu->ecap)) {
+       if (ecs_enabled(iommu)) {
                if (devfn >= 0x80) {
                        devfn -= 0x80;
                        entry = &root->hi;
@@ -696,6 +708,11 @@ static inline struct context_entry *iommu_context_addr(struct intel_iommu *iommu
        return &context[devfn];
 }
 
+static int iommu_dummy(struct device *dev)
+{
+       return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
+}
+
 static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devfn)
 {
        struct dmar_drhd_unit *drhd = NULL;
@@ -705,6 +722,9 @@ static struct intel_iommu *device_to_iommu(struct device *dev, u8 *bus, u8 *devf
        u16 segment = 0;
        int i;
 
+       if (iommu_dummy(dev))
+               return NULL;
+
        if (dev_is_pci(dev)) {
                pdev = to_pci_dev(dev);
                segment = pci_domain_nr(pdev->bus);
@@ -798,7 +818,7 @@ static void free_context_table(struct intel_iommu *iommu)
                if (context)
                        free_pgtable_page(context);
 
-               if (!ecap_ecs(iommu->ecap))
+               if (!ecs_enabled(iommu))
                        continue;
 
                context = iommu_context_addr(iommu, i, 0x80, 0);
@@ -1133,7 +1153,7 @@ static void iommu_set_root_entry(struct intel_iommu *iommu)
        unsigned long flag;
 
        addr = virt_to_phys(iommu->root_entry);
-       if (ecap_ecs(iommu->ecap))
+       if (ecs_enabled(iommu))
                addr |= DMA_RTADDR_RTT;
 
        raw_spin_lock_irqsave(&iommu->register_lock, flag);
@@ -2969,11 +2989,6 @@ static inline struct dmar_domain *get_valid_domain_for_dev(struct device *dev)
        return __get_valid_domain_for_dev(dev);
 }
 
-static int iommu_dummy(struct device *dev)
-{
-       return dev->archdata.iommu == DUMMY_DEVICE_DOMAIN_INFO;
-}
-
 /* Check if the dev needs to go through non-identity map and unmap process.*/
 static int iommu_no_mapping(struct device *dev)
 {
index 5709ae9c3e771d2f82a1bda2a23d500d8f4faffe..80f1d1486247f97959d0a8bc58dda6f54d8fb202 100644 (file)
@@ -8,6 +8,7 @@
 #include <linux/irq.h>
 #include <linux/intel-iommu.h>
 #include <linux/acpi.h>
+#include <linux/irqdomain.h>
 #include <asm/io_apic.h>
 #include <asm/smp.h>
 #include <asm/cpu.h>
 
 #include "irq_remapping.h"
 
+enum irq_mode {
+       IRQ_REMAPPING,
+       IRQ_POSTING,
+};
+
 struct ioapic_scope {
        struct intel_iommu *iommu;
        unsigned int id;
@@ -31,6 +37,22 @@ struct hpet_scope {
        unsigned int devfn;
 };
 
+struct irq_2_iommu {
+       struct intel_iommu *iommu;
+       u16 irte_index;
+       u16 sub_handle;
+       u8  irte_mask;
+       enum irq_mode mode;
+};
+
+struct intel_ir_data {
+       struct irq_2_iommu                      irq_2_iommu;
+       struct irte                             irte_entry;
+       union {
+               struct msi_msg                  msi_entry;
+       };
+};
+
 #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0)
 #define IRTE_DEST(dest) ((eim_mode) ? dest : dest << 8)
 
@@ -50,43 +72,14 @@ static struct hpet_scope ir_hpet[MAX_HPET_TBS];
  * the dmar_global_lock.
  */
 static DEFINE_RAW_SPINLOCK(irq_2_ir_lock);
+static struct irq_domain_ops intel_ir_domain_ops;
 
 static int __init parse_ioapics_under_ir(void);
 
-static struct irq_2_iommu *irq_2_iommu(unsigned int irq)
-{
-       struct irq_cfg *cfg = irq_cfg(irq);
-       return cfg ? &cfg->irq_2_iommu : NULL;
-}
-
-static int get_irte(int irq, struct irte *entry)
-{
-       struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
-       unsigned long flags;
-       int index;
-
-       if (!entry || !irq_iommu)
-               return -1;
-
-       raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
-
-       if (unlikely(!irq_iommu->iommu)) {
-               raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-               return -1;
-       }
-
-       index = irq_iommu->irte_index + irq_iommu->sub_handle;
-       *entry = *(irq_iommu->iommu->ir_table->base + index);
-
-       raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-       return 0;
-}
-
-static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
+static int alloc_irte(struct intel_iommu *iommu, int irq,
+                     struct irq_2_iommu *irq_iommu, u16 count)
 {
        struct ir_table *table = iommu->ir_table;
-       struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
-       struct irq_cfg *cfg = irq_cfg(irq);
        unsigned int mask = 0;
        unsigned long flags;
        int index;
@@ -113,11 +106,11 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count)
        if (index < 0) {
                pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id);
        } else {
-               cfg->remapped = 1;
                irq_iommu->iommu = iommu;
                irq_iommu->irte_index =  index;
                irq_iommu->sub_handle = 0;
                irq_iommu->irte_mask = mask;
+               irq_iommu->mode = IRQ_REMAPPING;
        }
        raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 
@@ -135,47 +128,9 @@ static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask)
        return qi_submit_sync(&desc, iommu);
 }
 
-static int map_irq_to_irte_handle(int irq, u16 *sub_handle)
-{
-       struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
-       unsigned long flags;
-       int index;
-
-       if (!irq_iommu)
-               return -1;
-
-       raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
-       *sub_handle = irq_iommu->sub_handle;
-       index = irq_iommu->irte_index;
-       raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-       return index;
-}
-
-static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle)
+static int modify_irte(struct irq_2_iommu *irq_iommu,
+                      struct irte *irte_modified)
 {
-       struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
-       struct irq_cfg *cfg = irq_cfg(irq);
-       unsigned long flags;
-
-       if (!irq_iommu)
-               return -1;
-
-       raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
-
-       cfg->remapped = 1;
-       irq_iommu->iommu = iommu;
-       irq_iommu->irte_index = index;
-       irq_iommu->sub_handle = subhandle;
-       irq_iommu->irte_mask = 0;
-
-       raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-
-       return 0;
-}
-
-static int modify_irte(int irq, struct irte *irte_modified)
-{
-       struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
        struct intel_iommu *iommu;
        unsigned long flags;
        struct irte *irte;
@@ -196,6 +151,9 @@ static int modify_irte(int irq, struct irte *irte_modified)
        __iommu_flush_cache(iommu, irte, sizeof(*irte));
 
        rc = qi_flush_iec(iommu, index, 0);
+
+       /* Update iommu mode according to the IRTE mode */
+       irq_iommu->mode = irte->pst ? IRQ_POSTING : IRQ_REMAPPING;
        raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
 
        return rc;
@@ -242,7 +200,7 @@ static int clear_entries(struct irq_2_iommu *irq_iommu)
                return 0;
 
        iommu = irq_iommu->iommu;
-       index = irq_iommu->irte_index + irq_iommu->sub_handle;
+       index = irq_iommu->irte_index;
 
        start = iommu->ir_table->base + index;
        end = start + (1 << irq_iommu->irte_mask);
@@ -257,29 +215,6 @@ static int clear_entries(struct irq_2_iommu *irq_iommu)
        return qi_flush_iec(iommu, index, irq_iommu->irte_mask);
 }
 
-static int free_irte(int irq)
-{
-       struct irq_2_iommu *irq_iommu = irq_2_iommu(irq);
-       unsigned long flags;
-       int rc;
-
-       if (!irq_iommu)
-               return -1;
-
-       raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
-
-       rc = clear_entries(irq_iommu);
-
-       irq_iommu->iommu = NULL;
-       irq_iommu->irte_index = 0;
-       irq_iommu->sub_handle = 0;
-       irq_iommu->irte_mask = 0;
-
-       raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
-
-       return rc;
-}
-
 /*
  * source validation type
  */
@@ -488,7 +423,6 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
 
        pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO,
                                 INTR_REMAP_PAGE_ORDER);
-
        if (!pages) {
                pr_err("IR%d: failed to allocate pages of order %d\n",
                       iommu->seq_id, INTR_REMAP_PAGE_ORDER);
@@ -502,11 +436,23 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu)
                goto out_free_pages;
        }
 
+       iommu->ir_domain = irq_domain_add_hierarchy(arch_get_ir_parent_domain(),
+                                                   0, INTR_REMAP_TABLE_ENTRIES,
+                                                   NULL, &intel_ir_domain_ops,
+                                                   iommu);
+       if (!iommu->ir_domain) {
+               pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id);
+               goto out_free_bitmap;
+       }
+       iommu->ir_msi_domain = arch_create_msi_irq_domain(iommu->ir_domain);
+
        ir_table->base = page_address(pages);
        ir_table->bitmap = bitmap;
        iommu->ir_table = ir_table;
        return 0;
 
+out_free_bitmap:
+       kfree(bitmap);
 out_free_pages:
        __free_pages(pages, INTR_REMAP_PAGE_ORDER);
 out_free_table:
@@ -517,6 +463,14 @@ out_free_table:
 static void intel_teardown_irq_remapping(struct intel_iommu *iommu)
 {
        if (iommu && iommu->ir_table) {
+               if (iommu->ir_msi_domain) {
+                       irq_domain_remove(iommu->ir_msi_domain);
+                       iommu->ir_msi_domain = NULL;
+               }
+               if (iommu->ir_domain) {
+                       irq_domain_remove(iommu->ir_domain);
+                       iommu->ir_domain = NULL;
+               }
                free_pages((unsigned long)iommu->ir_table->base,
                           INTR_REMAP_PAGE_ORDER);
                kfree(iommu->ir_table->bitmap);
@@ -627,6 +581,26 @@ error:
        return -ENODEV;
 }
 
+/*
+ * Set Posted-Interrupts capability.
+ */
+static inline void set_irq_posting_cap(void)
+{
+       struct dmar_drhd_unit *drhd;
+       struct intel_iommu *iommu;
+
+       if (!disable_irq_post) {
+               intel_irq_remap_ops.capability |= 1 << IRQ_POSTING_CAP;
+
+               for_each_iommu(iommu, drhd)
+                       if (!cap_pi_support(iommu->cap)) {
+                               intel_irq_remap_ops.capability &=
+                                               ~(1 << IRQ_POSTING_CAP);
+                               break;
+                       }
+       }
+}
+
 static int __init intel_enable_irq_remapping(void)
 {
        struct dmar_drhd_unit *drhd;
@@ -702,12 +676,7 @@ static int __init intel_enable_irq_remapping(void)
 
        irq_remapping_enabled = 1;
 
-       /*
-        * VT-d has a different layout for IO-APIC entries when
-        * interrupt remapping is enabled. So it needs a special routine
-        * to print IO-APIC entries for debugging purposes too.
-        */
-       x86_io_apic_ops.print_entries = intel_ir_io_apic_print_entries;
+       set_irq_posting_cap();
 
        pr_info("Enabled IRQ remapping in %s mode\n", eim ? "x2apic" : "xapic");
 
@@ -909,6 +878,12 @@ static void disable_irq_remapping(void)
 
                iommu_disable_irq_remapping(iommu);
        }
+
+       /*
+        * Clear Posted-Interrupts capability.
+        */
+       if (!disable_irq_post)
+               intel_irq_remap_ops.capability &= ~(1 << IRQ_POSTING_CAP);
 }
 
 static int reenable_irq_remapping(int eim)
@@ -936,6 +911,8 @@ static int reenable_irq_remapping(int eim)
        if (!setup)
                goto error;
 
+       set_irq_posting_cap();
+
        return 0;
 
 error:
@@ -945,8 +922,7 @@ error:
        return -1;
 }
 
-static void prepare_irte(struct irte *irte, int vector,
-                        unsigned int dest)
+static void prepare_irte(struct irte *irte, int vector, unsigned int dest)
 {
        memset(irte, 0, sizeof(*irte));
 
@@ -966,76 +942,63 @@ static void prepare_irte(struct irte *irte, int vector,
        irte->redir_hint = 1;
 }
 
-static int intel_setup_ioapic_entry(int irq,
-                                   struct IO_APIC_route_entry *route_entry,
-                                   unsigned int destination, int vector,
-                                   struct io_apic_irq_attr *attr)
+static struct irq_domain *intel_get_ir_irq_domain(struct irq_alloc_info *info)
 {
-       int ioapic_id = mpc_ioapic_id(attr->ioapic);
-       struct intel_iommu *iommu;
-       struct IR_IO_APIC_route_entry *entry;
-       struct irte irte;
-       int index;
-
-       down_read(&dmar_global_lock);
-       iommu = map_ioapic_to_ir(ioapic_id);
-       if (!iommu) {
-               pr_warn("No mapping iommu for ioapic %d\n", ioapic_id);
-               index = -ENODEV;
-       } else {
-               index = alloc_irte(iommu, irq, 1);
-               if (index < 0) {
-                       pr_warn("Failed to allocate IRTE for ioapic %d\n",
-                               ioapic_id);
-                       index = -ENOMEM;
-               }
-       }
-       up_read(&dmar_global_lock);
-       if (index < 0)
-               return index;
-
-       prepare_irte(&irte, vector, destination);
+       struct intel_iommu *iommu = NULL;
 
-       /* Set source-id of interrupt request */
-       set_ioapic_sid(&irte, ioapic_id);
+       if (!info)
+               return NULL;
 
-       modify_irte(irq, &irte);
+       switch (info->type) {
+       case X86_IRQ_ALLOC_TYPE_IOAPIC:
+               iommu = map_ioapic_to_ir(info->ioapic_id);
+               break;
+       case X86_IRQ_ALLOC_TYPE_HPET:
+               iommu = map_hpet_to_ir(info->hpet_id);
+               break;
+       case X86_IRQ_ALLOC_TYPE_MSI:
+       case X86_IRQ_ALLOC_TYPE_MSIX:
+               iommu = map_dev_to_ir(info->msi_dev);
+               break;
+       default:
+               BUG_ON(1);
+               break;
+       }
 
-       apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: "
-               "Set IRTE entry (P:%d FPD:%d Dst_Mode:%d "
-               "Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X "
-               "Avail:%X Vector:%02X Dest:%08X "
-               "SID:%04X SQ:%X SVT:%X)\n",
-               attr->ioapic, irte.present, irte.fpd, irte.dst_mode,
-               irte.redir_hint, irte.trigger_mode, irte.dlvry_mode,
-               irte.avail, irte.vector, irte.dest_id,
-               irte.sid, irte.sq, irte.svt);
+       return iommu ? iommu->ir_domain : NULL;
+}
 
-       entry = (struct IR_IO_APIC_route_entry *)route_entry;
-       memset(entry, 0, sizeof(*entry));
+static struct irq_domain *intel_get_irq_domain(struct irq_alloc_info *info)
+{
+       struct intel_iommu *iommu;
 
-       entry->index2   = (index >> 15) & 0x1;
-       entry->zero     = 0;
-       entry->format   = 1;
-       entry->index    = (index & 0x7fff);
-       /*
-        * IO-APIC RTE will be configured with virtual vector.
-        * irq handler will do the explicit EOI to the io-apic.
-        */
-       entry->vector   = attr->ioapic_pin;
-       entry->mask     = 0;                    /* enable IRQ */
-       entry->trigger  = attr->trigger;
-       entry->polarity = attr->polarity;
+       if (!info)
+               return NULL;
 
-       /* Mask level triggered irqs.
-        * Use IRQ_DELAYED_DISABLE for edge triggered irqs.
-        */
-       if (attr->trigger)
-               entry->mask = 1;
+       switch (info->type) {
+       case X86_IRQ_ALLOC_TYPE_MSI:
+       case X86_IRQ_ALLOC_TYPE_MSIX:
+               iommu = map_dev_to_ir(info->msi_dev);
+               if (iommu)
+                       return iommu->ir_msi_domain;
+               break;
+       default:
+               break;
+       }
 
-       return 0;
+       return NULL;
 }
 
+struct irq_remap_ops intel_irq_remap_ops = {
+       .prepare                = intel_prepare_irq_remapping,
+       .enable                 = intel_enable_irq_remapping,
+       .disable                = disable_irq_remapping,
+       .reenable               = reenable_irq_remapping,
+       .enable_faulting        = enable_drhd_fault_handling,
+       .get_ir_irq_domain      = intel_get_ir_irq_domain,
+       .get_irq_domain         = intel_get_irq_domain,
+};
+
 /*
  * Migrate the IO-APIC irq in the presence of intr-remapping.
  *
@@ -1051,170 +1014,282 @@ static int intel_setup_ioapic_entry(int irq,
  * is used to migrate MSI irq's in the presence of interrupt-remapping.
  */
 static int
-intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
-                         bool force)
+intel_ir_set_affinity(struct irq_data *data, const struct cpumask *mask,
+                     bool force)
 {
+       struct intel_ir_data *ir_data = data->chip_data;
+       struct irte *irte = &ir_data->irte_entry;
        struct irq_cfg *cfg = irqd_cfg(data);
-       unsigned int dest, irq = data->irq;
-       struct irte irte;
-       int err;
-
-       if (!config_enabled(CONFIG_SMP))
-               return -EINVAL;
-
-       if (!cpumask_intersects(mask, cpu_online_mask))
-               return -EINVAL;
-
-       if (get_irte(irq, &irte))
-               return -EBUSY;
-
-       err = assign_irq_vector(irq, cfg, mask);
-       if (err)
-               return err;
-
-       err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest);
-       if (err) {
-               if (assign_irq_vector(irq, cfg, data->affinity))
-                       pr_err("Failed to recover vector for irq %d\n", irq);
-               return err;
-       }
+       struct irq_data *parent = data->parent_data;
+       int ret;
 
-       irte.vector = cfg->vector;
-       irte.dest_id = IRTE_DEST(dest);
+       ret = parent->chip->irq_set_affinity(parent, mask, force);
+       if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE)
+               return ret;
 
        /*
         * Atomically updates the IRTE with the new destination, vector
         * and flushes the interrupt entry cache.
         */
-       modify_irte(irq, &irte);
+       irte->vector = cfg->vector;
+       irte->dest_id = IRTE_DEST(cfg->dest_apicid);
+
+       /* Update the hardware only if the interrupt is in remapped mode. */
+       if (ir_data->irq_2_iommu.mode == IRQ_REMAPPING)
+               modify_irte(&ir_data->irq_2_iommu, irte);
 
        /*
         * After this point, all the interrupts will start arriving
         * at the new destination. So, time to cleanup the previous
         * vector allocation.
         */
-       if (cfg->move_in_progress)
-               send_cleanup_vector(cfg);
+       send_cleanup_vector(cfg);
 
-       cpumask_copy(data->affinity, mask);
-       return 0;
+       return IRQ_SET_MASK_OK_DONE;
 }
 
-static void intel_compose_msi_msg(struct pci_dev *pdev,
-                                 unsigned int irq, unsigned int dest,
-                                 struct msi_msg *msg, u8 hpet_id)
+static void intel_ir_compose_msi_msg(struct irq_data *irq_data,
+                                    struct msi_msg *msg)
 {
-       struct irq_cfg *cfg;
-       struct irte irte;
-       u16 sub_handle = 0;
-       int ir_index;
+       struct intel_ir_data *ir_data = irq_data->chip_data;
 
-       cfg = irq_cfg(irq);
+       *msg = ir_data->msi_entry;
+}
 
-       ir_index = map_irq_to_irte_handle(irq, &sub_handle);
-       BUG_ON(ir_index == -1);
+static int intel_ir_set_vcpu_affinity(struct irq_data *data, void *info)
+{
+       struct intel_ir_data *ir_data = data->chip_data;
+       struct vcpu_data *vcpu_pi_info = info;
 
-       prepare_irte(&irte, cfg->vector, dest);
+       /* stop posting interrupts, back to remapping mode */
+       if (!vcpu_pi_info) {
+               modify_irte(&ir_data->irq_2_iommu, &ir_data->irte_entry);
+       } else {
+               struct irte irte_pi;
 
-       /* Set source-id of interrupt request */
-       if (pdev)
-               set_msi_sid(&irte, pdev);
-       else
-               set_hpet_sid(&irte, hpet_id);
+               /*
+                * We are not caching the posted interrupt entry. We
+                * copy the data from the remapped entry and modify
+                * the fields which are relevant for posted mode. The
+                * cached remapped entry is used for switching back to
+                * remapped mode.
+                */
+               memset(&irte_pi, 0, sizeof(irte_pi));
+               dmar_copy_shared_irte(&irte_pi, &ir_data->irte_entry);
+
+               /* Update the posted mode fields */
+               irte_pi.p_pst = 1;
+               irte_pi.p_urgent = 0;
+               irte_pi.p_vector = vcpu_pi_info->vector;
+               irte_pi.pda_l = (vcpu_pi_info->pi_desc_addr >>
+                               (32 - PDA_LOW_BIT)) & ~(-1UL << PDA_LOW_BIT);
+               irte_pi.pda_h = (vcpu_pi_info->pi_desc_addr >> 32) &
+                               ~(-1UL << PDA_HIGH_BIT);
+
+               modify_irte(&ir_data->irq_2_iommu, &irte_pi);
+       }
 
-       modify_irte(irq, &irte);
+       return 0;
+}
+
+static struct irq_chip intel_ir_chip = {
+       .irq_ack = ir_ack_apic_edge,
+       .irq_set_affinity = intel_ir_set_affinity,
+       .irq_compose_msi_msg = intel_ir_compose_msi_msg,
+       .irq_set_vcpu_affinity = intel_ir_set_vcpu_affinity,
+};
 
-       msg->address_hi = MSI_ADDR_BASE_HI;
-       msg->data = sub_handle;
-       msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
-                         MSI_ADDR_IR_SHV |
-                         MSI_ADDR_IR_INDEX1(ir_index) |
-                         MSI_ADDR_IR_INDEX2(ir_index);
+static void intel_irq_remapping_prepare_irte(struct intel_ir_data *data,
+                                            struct irq_cfg *irq_cfg,
+                                            struct irq_alloc_info *info,
+                                            int index, int sub_handle)
+{
+       struct IR_IO_APIC_route_entry *entry;
+       struct irte *irte = &data->irte_entry;
+       struct msi_msg *msg = &data->msi_entry;
+
+       prepare_irte(irte, irq_cfg->vector, irq_cfg->dest_apicid);
+       switch (info->type) {
+       case X86_IRQ_ALLOC_TYPE_IOAPIC:
+               /* Set source-id of interrupt request */
+               set_ioapic_sid(irte, info->ioapic_id);
+               apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: Set IRTE entry (P:%d FPD:%d Dst_Mode:%d Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X Avail:%X Vector:%02X Dest:%08X SID:%04X SQ:%X SVT:%X)\n",
+                       info->ioapic_id, irte->present, irte->fpd,
+                       irte->dst_mode, irte->redir_hint,
+                       irte->trigger_mode, irte->dlvry_mode,
+                       irte->avail, irte->vector, irte->dest_id,
+                       irte->sid, irte->sq, irte->svt);
+
+               entry = (struct IR_IO_APIC_route_entry *)info->ioapic_entry;
+               info->ioapic_entry = NULL;
+               memset(entry, 0, sizeof(*entry));
+               entry->index2   = (index >> 15) & 0x1;
+               entry->zero     = 0;
+               entry->format   = 1;
+               entry->index    = (index & 0x7fff);
+               /*
+                * IO-APIC RTE will be configured with virtual vector.
+                * irq handler will do the explicit EOI to the io-apic.
+                */
+               entry->vector   = info->ioapic_pin;
+               entry->mask     = 0;                    /* enable IRQ */
+               entry->trigger  = info->ioapic_trigger;
+               entry->polarity = info->ioapic_polarity;
+               if (info->ioapic_trigger)
+                       entry->mask = 1; /* Mask level triggered irqs. */
+               break;
+
+       case X86_IRQ_ALLOC_TYPE_HPET:
+       case X86_IRQ_ALLOC_TYPE_MSI:
+       case X86_IRQ_ALLOC_TYPE_MSIX:
+               if (info->type == X86_IRQ_ALLOC_TYPE_HPET)
+                       set_hpet_sid(irte, info->hpet_id);
+               else
+                       set_msi_sid(irte, info->msi_dev);
+
+               msg->address_hi = MSI_ADDR_BASE_HI;
+               msg->data = sub_handle;
+               msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT |
+                                 MSI_ADDR_IR_SHV |
+                                 MSI_ADDR_IR_INDEX1(index) |
+                                 MSI_ADDR_IR_INDEX2(index);
+               break;
+
+       default:
+               BUG_ON(1);
+               break;
+       }
 }
 
-/*
- * Map the PCI dev to the corresponding remapping hardware unit
- * and allocate 'nvec' consecutive interrupt-remapping table entries
- * in it.
- */
-static int intel_msi_alloc_irq(struct pci_dev *dev, int irq, int nvec)
+static void intel_free_irq_resources(struct irq_domain *domain,
+                                    unsigned int virq, unsigned int nr_irqs)
 {
-       struct intel_iommu *iommu;
-       int index;
+       struct irq_data *irq_data;
+       struct intel_ir_data *data;
+       struct irq_2_iommu *irq_iommu;
+       unsigned long flags;
+       int i;
 
-       down_read(&dmar_global_lock);
-       iommu = map_dev_to_ir(dev);
-       if (!iommu) {
-               printk(KERN_ERR
-                      "Unable to map PCI %s to iommu\n", pci_name(dev));
-               index = -ENOENT;
-       } else {
-               index = alloc_irte(iommu, irq, nvec);
-               if (index < 0) {
-                       printk(KERN_ERR
-                              "Unable to allocate %d IRTE for PCI %s\n",
-                              nvec, pci_name(dev));
-                       index = -ENOSPC;
+       for (i = 0; i < nr_irqs; i++) {
+               irq_data = irq_domain_get_irq_data(domain, virq  + i);
+               if (irq_data && irq_data->chip_data) {
+                       data = irq_data->chip_data;
+                       irq_iommu = &data->irq_2_iommu;
+                       raw_spin_lock_irqsave(&irq_2_ir_lock, flags);
+                       clear_entries(irq_iommu);
+                       raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags);
+                       irq_domain_reset_irq_data(irq_data);
+                       kfree(data);
                }
        }
-       up_read(&dmar_global_lock);
-
-       return index;
 }
 
-static int intel_msi_setup_irq(struct pci_dev *pdev, unsigned int irq,
-                              int index, int sub_handle)
+static int intel_irq_remapping_alloc(struct irq_domain *domain,
+                                    unsigned int virq, unsigned int nr_irqs,
+                                    void *arg)
 {
-       struct intel_iommu *iommu;
-       int ret = -ENOENT;
+       struct intel_iommu *iommu = domain->host_data;
+       struct irq_alloc_info *info = arg;
+       struct intel_ir_data *data, *ird;
+       struct irq_data *irq_data;
+       struct irq_cfg *irq_cfg;
+       int i, ret, index;
+
+       if (!info || !iommu)
+               return -EINVAL;
+       if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_MSI &&
+           info->type != X86_IRQ_ALLOC_TYPE_MSIX)
+               return -EINVAL;
+
+       /*
+        * With IRQ remapping enabled, don't need contiguous CPU vectors
+        * to support multiple MSI interrupts.
+        */
+       if (info->type == X86_IRQ_ALLOC_TYPE_MSI)
+               info->flags &= ~X86_IRQ_ALLOC_CONTIGUOUS_VECTORS;
+
+       ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
+       if (ret < 0)
+               return ret;
+
+       ret = -ENOMEM;
+       data = kzalloc(sizeof(*data), GFP_KERNEL);
+       if (!data)
+               goto out_free_parent;
 
        down_read(&dmar_global_lock);
-       iommu = map_dev_to_ir(pdev);
-       if (iommu) {
-               /*
-                * setup the mapping between the irq and the IRTE
-                * base index, the sub_handle pointing to the
-                * appropriate interrupt remap table entry.
-                */
-               set_irte_irq(irq, iommu, index, sub_handle);
-               ret = 0;
-       }
+       index = alloc_irte(iommu, virq, &data->irq_2_iommu, nr_irqs);
        up_read(&dmar_global_lock);
+       if (index < 0) {
+               pr_warn("Failed to allocate IRTE\n");
+               kfree(data);
+               goto out_free_parent;
+       }
 
+       for (i = 0; i < nr_irqs; i++) {
+               irq_data = irq_domain_get_irq_data(domain, virq + i);
+               irq_cfg = irqd_cfg(irq_data);
+               if (!irq_data || !irq_cfg) {
+                       ret = -EINVAL;
+                       goto out_free_data;
+               }
+
+               if (i > 0) {
+                       ird = kzalloc(sizeof(*ird), GFP_KERNEL);
+                       if (!ird)
+                               goto out_free_data;
+                       /* Initialize the common data */
+                       ird->irq_2_iommu = data->irq_2_iommu;
+                       ird->irq_2_iommu.sub_handle = i;
+               } else {
+                       ird = data;
+               }
+
+               irq_data->hwirq = (index << 16) + i;
+               irq_data->chip_data = ird;
+               irq_data->chip = &intel_ir_chip;
+               intel_irq_remapping_prepare_irte(ird, irq_cfg, info, index, i);
+               irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT);
+       }
+       return 0;
+
+out_free_data:
+       intel_free_irq_resources(domain, virq, i);
+out_free_parent:
+       irq_domain_free_irqs_common(domain, virq, nr_irqs);
        return ret;
 }
 
-static int intel_alloc_hpet_msi(unsigned int irq, unsigned int id)
+static void intel_irq_remapping_free(struct irq_domain *domain,
+                                    unsigned int virq, unsigned int nr_irqs)
 {
-       int ret = -1;
-       struct intel_iommu *iommu;
-       int index;
+       intel_free_irq_resources(domain, virq, nr_irqs);
+       irq_domain_free_irqs_common(domain, virq, nr_irqs);
+}
 
-       down_read(&dmar_global_lock);
-       iommu = map_hpet_to_ir(id);
-       if (iommu) {
-               index = alloc_irte(iommu, irq, 1);
-               if (index >= 0)
-                       ret = 0;
-       }
-       up_read(&dmar_global_lock);
+static void intel_irq_remapping_activate(struct irq_domain *domain,
+                                        struct irq_data *irq_data)
+{
+       struct intel_ir_data *data = irq_data->chip_data;
 
-       return ret;
+       modify_irte(&data->irq_2_iommu, &data->irte_entry);
 }
 
-struct irq_remap_ops intel_irq_remap_ops = {
-       .prepare                = intel_prepare_irq_remapping,
-       .enable                 = intel_enable_irq_remapping,
-       .disable                = disable_irq_remapping,
-       .reenable               = reenable_irq_remapping,
-       .enable_faulting        = enable_drhd_fault_handling,
-       .setup_ioapic_entry     = intel_setup_ioapic_entry,
-       .set_affinity           = intel_ioapic_set_affinity,
-       .free_irq               = free_irte,
-       .compose_msi_msg        = intel_compose_msi_msg,
-       .msi_alloc_irq          = intel_msi_alloc_irq,
-       .msi_setup_irq          = intel_msi_setup_irq,
-       .alloc_hpet_msi         = intel_alloc_hpet_msi,
+static void intel_irq_remapping_deactivate(struct irq_domain *domain,
+                                          struct irq_data *irq_data)
+{
+       struct intel_ir_data *data = irq_data->chip_data;
+       struct irte entry;
+
+       memset(&entry, 0, sizeof(entry));
+       modify_irte(&data->irq_2_iommu, &entry);
+}
+
+static struct irq_domain_ops intel_ir_domain_ops = {
+       .alloc = intel_irq_remapping_alloc,
+       .free = intel_irq_remapping_free,
+       .activate = intel_irq_remapping_activate,
+       .deactivate = intel_irq_remapping_deactivate,
 };
 
 /*
@@ -1280,6 +1355,9 @@ int dmar_ir_hotplug(struct dmar_drhd_unit *dmaru, bool insert)
                return -EINVAL;
        if (!ecap_ir_support(iommu->ecap))
                return 0;
+       if (irq_remapping_cap(IRQ_POSTING_CAP) &&
+           !cap_pi_support(iommu->cap))
+               return -EBUSY;
 
        if (insert) {
                if (!iommu->ir_table)
index 390079ee13507747388f635bf67c1c44dfb6c068..2d9993062ded6b2d543c89a9c09c3e6a85dfb17f 100644 (file)
@@ -6,6 +6,7 @@
 #include <linux/msi.h>
 #include <linux/irq.h>
 #include <linux/pci.h>
+#include <linux/irqdomain.h>
 
 #include <asm/hw_irq.h>
 #include <asm/irq_remapping.h>
@@ -21,21 +22,11 @@ int irq_remap_broken;
 int disable_sourceid_checking;
 int no_x2apic_optout;
 
+int disable_irq_post = 1;
+
 static int disable_irq_remap;
 static struct irq_remap_ops *remap_ops;
 
-static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec);
-static int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
-                                 int index, int sub_handle);
-static int set_remapped_irq_affinity(struct irq_data *data,
-                                    const struct cpumask *mask,
-                                    bool force);
-
-static bool irq_remapped(struct irq_cfg *cfg)
-{
-       return (cfg->remapped == 1);
-}
-
 static void irq_remapping_disable_io_apic(void)
 {
        /*
@@ -49,117 +40,9 @@ static void irq_remapping_disable_io_apic(void)
                disconnect_bsp_APIC(0);
 }
 
-static int do_setup_msi_irqs(struct pci_dev *dev, int nvec)
-{
-       int ret, sub_handle, nvec_pow2, index = 0;
-       unsigned int irq;
-       struct msi_desc *msidesc;
-
-       msidesc = list_entry(dev->msi_list.next, struct msi_desc, list);
-
-       irq = irq_alloc_hwirqs(nvec, dev_to_node(&dev->dev));
-       if (irq == 0)
-               return -ENOSPC;
-
-       nvec_pow2 = __roundup_pow_of_two(nvec);
-       for (sub_handle = 0; sub_handle < nvec; sub_handle++) {
-               if (!sub_handle) {
-                       index = msi_alloc_remapped_irq(dev, irq, nvec_pow2);
-                       if (index < 0) {
-                               ret = index;
-                               goto error;
-                       }
-               } else {
-                       ret = msi_setup_remapped_irq(dev, irq + sub_handle,
-                                                    index, sub_handle);
-                       if (ret < 0)
-                               goto error;
-               }
-               ret = setup_msi_irq(dev, msidesc, irq, sub_handle);
-               if (ret < 0)
-                       goto error;
-       }
-       return 0;
-
-error:
-       irq_free_hwirqs(irq, nvec);
-
-       /*
-        * Restore altered MSI descriptor fields and prevent just destroyed
-        * IRQs from tearing down again in default_teardown_msi_irqs()
-        */
-       msidesc->irq = 0;
-
-       return ret;
-}
-
-static int do_setup_msix_irqs(struct pci_dev *dev, int nvec)
-{
-       int node, ret, sub_handle, index = 0;
-       struct msi_desc *msidesc;
-       unsigned int irq;
-
-       node            = dev_to_node(&dev->dev);
-       sub_handle      = 0;
-
-       list_for_each_entry(msidesc, &dev->msi_list, list) {
-
-               irq = irq_alloc_hwirq(node);
-               if (irq == 0)
-                       return -1;
-
-               if (sub_handle == 0)
-                       ret = index = msi_alloc_remapped_irq(dev, irq, nvec);
-               else
-                       ret = msi_setup_remapped_irq(dev, irq, index, sub_handle);
-
-               if (ret < 0)
-                       goto error;
-
-               ret = setup_msi_irq(dev, msidesc, irq, 0);
-               if (ret < 0)
-                       goto error;
-
-               sub_handle += 1;
-               irq        += 1;
-       }
-
-       return 0;
-
-error:
-       irq_free_hwirq(irq);
-       return ret;
-}
-
-static int irq_remapping_setup_msi_irqs(struct pci_dev *dev,
-                                       int nvec, int type)
-{
-       if (type == PCI_CAP_ID_MSI)
-               return do_setup_msi_irqs(dev, nvec);
-       else
-               return do_setup_msix_irqs(dev, nvec);
-}
-
-static void eoi_ioapic_pin_remapped(int apic, int pin, int vector)
-{
-       /*
-        * Intr-remapping uses pin number as the virtual vector
-        * in the RTE. Actual vector is programmed in
-        * intr-remapping table entry. Hence for the io-apic
-        * EOI we use the pin number.
-        */
-       io_apic_eoi(apic, pin);
-}
-
 static void __init irq_remapping_modify_x86_ops(void)
 {
        x86_io_apic_ops.disable         = irq_remapping_disable_io_apic;
-       x86_io_apic_ops.set_affinity    = set_remapped_irq_affinity;
-       x86_io_apic_ops.setup_entry     = setup_ioapic_remapped_entry;
-       x86_io_apic_ops.eoi_ioapic_pin  = eoi_ioapic_pin_remapped;
-       x86_msi.setup_msi_irqs          = irq_remapping_setup_msi_irqs;
-       x86_msi.setup_hpet_msi          = setup_hpet_msi_remapped;
-       x86_msi.compose_msi_msg         = compose_remapped_msi_msg;
 }
 
 static __init int setup_nointremap(char *str)
@@ -198,6 +81,15 @@ void set_irq_remapping_broken(void)
        irq_remap_broken = 1;
 }
 
+bool irq_remapping_cap(enum irq_remap_cap cap)
+{
+       if (!remap_ops || disable_irq_post)
+               return 0;
+
+       return (remap_ops->capability & (1 << cap));
+}
+EXPORT_SYMBOL_GPL(irq_remapping_cap);
+
 int __init irq_remapping_prepare(void)
 {
        if (disable_irq_remap)
@@ -254,113 +146,48 @@ int __init irq_remap_enable_fault_handling(void)
        return remap_ops->enable_faulting();
 }
 
-int setup_ioapic_remapped_entry(int irq,
-                               struct IO_APIC_route_entry *entry,
-                               unsigned int destination, int vector,
-                               struct io_apic_irq_attr *attr)
-{
-       if (!remap_ops->setup_ioapic_entry)
-               return -ENODEV;
-
-       return remap_ops->setup_ioapic_entry(irq, entry, destination,
-                                            vector, attr);
-}
-
-static int set_remapped_irq_affinity(struct irq_data *data,
-                                    const struct cpumask *mask, bool force)
-{
-       if (!config_enabled(CONFIG_SMP) || !remap_ops->set_affinity)
-               return 0;
-
-       return remap_ops->set_affinity(data, mask, force);
-}
-
-void free_remapped_irq(int irq)
-{
-       struct irq_cfg *cfg = irq_cfg(irq);
-
-       if (irq_remapped(cfg) && remap_ops->free_irq)
-               remap_ops->free_irq(irq);
-}
-
-void compose_remapped_msi_msg(struct pci_dev *pdev,
-                             unsigned int irq, unsigned int dest,
-                             struct msi_msg *msg, u8 hpet_id)
-{
-       struct irq_cfg *cfg = irq_cfg(irq);
-
-       if (!irq_remapped(cfg))
-               native_compose_msi_msg(pdev, irq, dest, msg, hpet_id);
-       else if (remap_ops->compose_msi_msg)
-               remap_ops->compose_msi_msg(pdev, irq, dest, msg, hpet_id);
-}
-
-static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec)
-{
-       if (!remap_ops->msi_alloc_irq)
-               return -ENODEV;
-
-       return remap_ops->msi_alloc_irq(pdev, irq, nvec);
-}
-
-static int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq,
-                                 int index, int sub_handle)
-{
-       if (!remap_ops->msi_setup_irq)
-               return -ENODEV;
-
-       return remap_ops->msi_setup_irq(pdev, irq, index, sub_handle);
-}
-
-int setup_hpet_msi_remapped(unsigned int irq, unsigned int id)
-{
-       int ret;
-
-       if (!remap_ops->alloc_hpet_msi)
-               return -ENODEV;
-
-       ret = remap_ops->alloc_hpet_msi(irq, id);
-       if (ret)
-               return -EINVAL;
-
-       return default_setup_hpet_msi(irq, id);
-}
-
 void panic_if_irq_remap(const char *msg)
 {
        if (irq_remapping_enabled)
                panic(msg);
 }
 
-static void ir_ack_apic_edge(struct irq_data *data)
+void ir_ack_apic_edge(struct irq_data *data)
 {
        ack_APIC_irq();
 }
 
-static void ir_ack_apic_level(struct irq_data *data)
+/**
+ * irq_remapping_get_ir_irq_domain - Get the irqdomain associated with the IOMMU
+ *                                  device serving request @info
+ * @info: interrupt allocation information, used to identify the IOMMU device
+ *
+ * It's used to get parent irqdomain for HPET and IOAPIC irqdomains.
+ * Returns pointer to IRQ domain, or NULL on failure.
+ */
+struct irq_domain *
+irq_remapping_get_ir_irq_domain(struct irq_alloc_info *info)
 {
-       ack_APIC_irq();
-       eoi_ioapic_irq(data->irq, irqd_cfg(data));
-}
+       if (!remap_ops || !remap_ops->get_ir_irq_domain)
+               return NULL;
 
-static void ir_print_prefix(struct irq_data *data, struct seq_file *p)
-{
-       seq_printf(p, " IR-%s", data->chip->name);
+       return remap_ops->get_ir_irq_domain(info);
 }
 
-void irq_remap_modify_chip_defaults(struct irq_chip *chip)
+/**
+ * irq_remapping_get_irq_domain - Get the irqdomain serving the request @info
+ * @info: interrupt allocation information, used to identify the IOMMU device
+ *
+ * There will be one PCI MSI/MSIX irqdomain associated with each interrupt
+ * remapping device, so this interface is used to retrieve the PCI MSI/MSIX
+ * irqdomain serving request @info.
+ * Returns pointer to IRQ domain, or NULL on failure.
+ */
+struct irq_domain *
+irq_remapping_get_irq_domain(struct irq_alloc_info *info)
 {
-       chip->irq_print_chip = ir_print_prefix;
-       chip->irq_ack = ir_ack_apic_edge;
-       chip->irq_eoi = ir_ack_apic_level;
-       chip->irq_set_affinity = x86_io_apic_ops.set_affinity;
-}
+       if (!remap_ops || !remap_ops->get_irq_domain)
+               return NULL;
 
-bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip)
-{
-       if (!irq_remapped(cfg))
-               return false;
-       irq_set_status_flags(irq, IRQ_MOVE_PCNTXT);
-       irq_remap_modify_chip_defaults(chip);
-       return true;
+       return remap_ops->get_irq_domain(info);
 }
index 7c70cc29ffe6b4d5341587465de8cf3c643a8956..039c7af7b190f9c3481e46bb81f34d4fcbecc099 100644 (file)
 
 #ifdef CONFIG_IRQ_REMAP
 
-struct IO_APIC_route_entry;
-struct io_apic_irq_attr;
 struct irq_data;
-struct cpumask;
-struct pci_dev;
 struct msi_msg;
+struct irq_domain;
+struct irq_alloc_info;
 
 extern int irq_remap_broken;
 extern int disable_sourceid_checking;
 extern int no_x2apic_optout;
 extern int irq_remapping_enabled;
 
+extern int disable_irq_post;
+
 struct irq_remap_ops {
+       /* The supported capabilities */
+       int capability;
+
        /* Initializes hardware and makes it ready for remapping interrupts */
        int  (*prepare)(void);
 
@@ -52,40 +55,23 @@ struct irq_remap_ops {
        /* Enable fault handling */
        int  (*enable_faulting)(void);
 
-       /* IO-APIC setup routine */
-       int (*setup_ioapic_entry)(int irq, struct IO_APIC_route_entry *,
-                                 unsigned int, int,
-                                 struct io_apic_irq_attr *);
-
-       /* Set the CPU affinity of a remapped interrupt */
-       int (*set_affinity)(struct irq_data *data, const struct cpumask *mask,
-                           bool force);
-
-       /* Free an IRQ */
-       int (*free_irq)(int);
+       /* Get the irqdomain associated the IOMMU device */
+       struct irq_domain *(*get_ir_irq_domain)(struct irq_alloc_info *);
 
-       /* Create MSI msg to use for interrupt remapping */
-       void (*compose_msi_msg)(struct pci_dev *,
-                               unsigned int, unsigned int,
-                               struct msi_msg *, u8);
-
-       /* Allocate remapping resources for MSI */
-       int (*msi_alloc_irq)(struct pci_dev *, int, int);
-
-       /* Setup the remapped MSI irq */
-       int (*msi_setup_irq)(struct pci_dev *, unsigned int, int, int);
-
-       /* Setup interrupt remapping for an HPET MSI */
-       int (*alloc_hpet_msi)(unsigned int, unsigned int);
+       /* Get the MSI irqdomain associated with the IOMMU device */
+       struct irq_domain *(*get_irq_domain)(struct irq_alloc_info *);
 };
 
 extern struct irq_remap_ops intel_irq_remap_ops;
 extern struct irq_remap_ops amd_iommu_irq_ops;
 
+extern void ir_ack_apic_edge(struct irq_data *data);
+
 #else  /* CONFIG_IRQ_REMAP */
 
 #define irq_remapping_enabled 0
 #define irq_remap_broken      0
+#define disable_irq_post      1
 
 #endif /* CONFIG_IRQ_REMAP */
 
index 4015560bf486db22e82f1652799731b90277b864..cab214544237cf6f89754c3878f0e57f66ba360d 100644 (file)
@@ -1004,20 +1004,18 @@ static int rk_iommu_remove(struct platform_device *pdev)
        return 0;
 }
 
-#ifdef CONFIG_OF
 static const struct of_device_id rk_iommu_dt_ids[] = {
        { .compatible = "rockchip,iommu" },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, rk_iommu_dt_ids);
-#endif
 
 static struct platform_driver rk_iommu_driver = {
        .probe = rk_iommu_probe,
        .remove = rk_iommu_remove,
        .driver = {
                   .name = "rk_iommu",
-                  .of_match_table = of_match_ptr(rk_iommu_dt_ids),
+                  .of_match_table = rk_iommu_dt_ids,
        },
 };
 
index 9687f8afebffbb865256ba6677663e6c76702aa1..1b7e155869f6c1a5f9ff361f246b6fd71539dadc 100644 (file)
@@ -828,7 +828,14 @@ static int its_alloc_tables(struct its_node *its)
                        u64 typer = readq_relaxed(its->base + GITS_TYPER);
                        u32 ids = GITS_TYPER_DEVBITS(typer);
 
-                       order = get_order((1UL << ids) * entry_size);
+                       /*
+                        * 'order' was initialized earlier to the default page
+                        * granule of the the ITS.  We can't have an allocation
+                        * smaller than that.  If the requested allocation
+                        * is smaller, round up to the default page granule.
+                        */
+                       order = max(get_order((1UL << ids) * entry_size),
+                                   order);
                        if (order >= MAX_ORDER) {
                                order = MAX_ORDER - 1;
                                pr_warn("%s: Device Table too large, reduce its page order to %u\n",
index 7b315e385ba3a0bba8de91fa851ab03c3ef653e8..01999d74bd3af32c5d05b8f14c97c07637d4571e 100644 (file)
@@ -82,19 +82,6 @@ static DEFINE_RAW_SPINLOCK(irq_controller_lock);
 #define NR_GIC_CPU_IF 8
 static u8 gic_cpu_map[NR_GIC_CPU_IF] __read_mostly;
 
-/*
- * Supported arch specific GIC irq extension.
- * Default make them NULL.
- */
-struct irq_chip gic_arch_extn = {
-       .irq_eoi        = NULL,
-       .irq_mask       = NULL,
-       .irq_unmask     = NULL,
-       .irq_retrigger  = NULL,
-       .irq_set_type   = NULL,
-       .irq_set_wake   = NULL,
-};
-
 #ifndef MAX_GIC_NR
 #define MAX_GIC_NR     1
 #endif
@@ -167,34 +154,16 @@ static int gic_peek_irq(struct irq_data *d, u32 offset)
 
 static void gic_mask_irq(struct irq_data *d)
 {
-       unsigned long flags;
-
-       raw_spin_lock_irqsave(&irq_controller_lock, flags);
        gic_poke_irq(d, GIC_DIST_ENABLE_CLEAR);
-       if (gic_arch_extn.irq_mask)
-               gic_arch_extn.irq_mask(d);
-       raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
 }
 
 static void gic_unmask_irq(struct irq_data *d)
 {
-       unsigned long flags;
-
-       raw_spin_lock_irqsave(&irq_controller_lock, flags);
-       if (gic_arch_extn.irq_unmask)
-               gic_arch_extn.irq_unmask(d);
        gic_poke_irq(d, GIC_DIST_ENABLE_SET);
-       raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
 }
 
 static void gic_eoi_irq(struct irq_data *d)
 {
-       if (gic_arch_extn.irq_eoi) {
-               raw_spin_lock(&irq_controller_lock);
-               gic_arch_extn.irq_eoi(d);
-               raw_spin_unlock(&irq_controller_lock);
-       }
-
        writel_relaxed(gic_irq(d), gic_cpu_base(d) + GIC_CPU_EOI);
 }
 
@@ -251,8 +220,6 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
 {
        void __iomem *base = gic_dist_base(d);
        unsigned int gicirq = gic_irq(d);
-       unsigned long flags;
-       int ret;
 
        /* Interrupt configuration for SGIs can't be changed */
        if (gicirq < 16)
@@ -263,25 +230,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type)
                            type != IRQ_TYPE_EDGE_RISING)
                return -EINVAL;
 
-       raw_spin_lock_irqsave(&irq_controller_lock, flags);
-
-       if (gic_arch_extn.irq_set_type)
-               gic_arch_extn.irq_set_type(d, type);
-
-       ret = gic_configure_irq(gicirq, type, base, NULL);
-
-       raw_spin_unlock_irqrestore(&irq_controller_lock, flags);
-
-       return ret;
-}
-
-static int gic_retrigger(struct irq_data *d)
-{
-       if (gic_arch_extn.irq_retrigger)
-               return gic_arch_extn.irq_retrigger(d);
-
-       /* the genirq layer expects 0 if we can't retrigger in hardware */
-       return 0;
+       return gic_configure_irq(gicirq, type, base, NULL);
 }
 
 #ifdef CONFIG_SMP
@@ -312,21 +261,6 @@ static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
 }
 #endif
 
-#ifdef CONFIG_PM
-static int gic_set_wake(struct irq_data *d, unsigned int on)
-{
-       int ret = -ENXIO;
-
-       if (gic_arch_extn.irq_set_wake)
-               ret = gic_arch_extn.irq_set_wake(d, on);
-
-       return ret;
-}
-
-#else
-#define gic_set_wake   NULL
-#endif
-
 static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs)
 {
        u32 irqstat, irqnr;
@@ -385,11 +319,9 @@ static struct irq_chip gic_chip = {
        .irq_unmask             = gic_unmask_irq,
        .irq_eoi                = gic_eoi_irq,
        .irq_set_type           = gic_set_type,
-       .irq_retrigger          = gic_retrigger,
 #ifdef CONFIG_SMP
        .irq_set_affinity       = gic_set_affinity,
 #endif
-       .irq_set_wake           = gic_set_wake,
        .irq_get_irqchip_state  = gic_irq_get_irqchip_state,
        .irq_set_irqchip_state  = gic_irq_set_irqchip_state,
 };
@@ -1055,7 +987,6 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start,
                set_handle_irq(gic_handle_irq);
        }
 
-       gic_chip.flags |= gic_arch_extn.flags;
        gic_dist_init(gic);
        gic_cpu_init(gic);
        gic_pm_init(gic);
index 57f09cb544644bcd97aa81bfc044c2686b350bbb..269c2354c43169307aa02438dbf38aa4b54f0dad 100644 (file)
@@ -271,7 +271,7 @@ int gic_get_c0_fdc_int(void)
                                  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
 }
 
-static void gic_handle_shared_int(void)
+static void gic_handle_shared_int(bool chained)
 {
        unsigned int i, intr, virq;
        unsigned long *pcpu_mask;
@@ -299,7 +299,10 @@ static void gic_handle_shared_int(void)
        while (intr != gic_shared_intrs) {
                virq = irq_linear_revmap(gic_irq_domain,
                                         GIC_SHARED_TO_HWIRQ(intr));
-               do_IRQ(virq);
+               if (chained)
+                       generic_handle_irq(virq);
+               else
+                       do_IRQ(virq);
 
                /* go to next pending bit */
                bitmap_clear(pending, intr, 1);
@@ -431,7 +434,7 @@ static struct irq_chip gic_edge_irq_controller = {
 #endif
 };
 
-static void gic_handle_local_int(void)
+static void gic_handle_local_int(bool chained)
 {
        unsigned long pending, masked;
        unsigned int intr, virq;
@@ -445,7 +448,10 @@ static void gic_handle_local_int(void)
        while (intr != GIC_NUM_LOCAL_INTRS) {
                virq = irq_linear_revmap(gic_irq_domain,
                                         GIC_LOCAL_TO_HWIRQ(intr));
-               do_IRQ(virq);
+               if (chained)
+                       generic_handle_irq(virq);
+               else
+                       do_IRQ(virq);
 
                /* go to next pending bit */
                bitmap_clear(&pending, intr, 1);
@@ -509,13 +515,14 @@ static struct irq_chip gic_all_vpes_local_irq_controller = {
 
 static void __gic_irq_dispatch(void)
 {
-       gic_handle_local_int();
-       gic_handle_shared_int();
+       gic_handle_local_int(false);
+       gic_handle_shared_int(false);
 }
 
 static void gic_irq_dispatch(unsigned int irq, struct irq_desc *desc)
 {
-       __gic_irq_dispatch();
+       gic_handle_local_int(true);
+       gic_handle_shared_int(true);
 }
 
 #ifdef CONFIG_MIPS_GIC_IPI
index 4a9ce5b50c5bba33b7428a0b67b88d26e31c4067..6b2b582433bde95062e85d17403e4a505c5a4ef9 100644 (file)
@@ -104,7 +104,7 @@ static int sunxi_sc_nmi_set_type(struct irq_data *data, unsigned int flow_type)
        irqd_set_trigger_type(data, flow_type);
        irq_setup_alt_chip(data, flow_type);
 
-       for (i = 0; i <= gc->num_ct; i++, ct++)
+       for (i = 0; i < gc->num_ct; i++, ct++)
                if (ct->type & flow_type)
                        ctrl_off = ct->regs.type;
 
index 51c485d9a87736bcf06cfdf67c089f81288bc8d8..f67bbd80433e8c90527a9b911a2656987b800b96 100644 (file)
@@ -264,7 +264,7 @@ static int tegra_ictlr_domain_alloc(struct irq_domain *domain,
 
                irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
                                              &tegra_ictlr_chip,
-                                             &info->base[ictlr]);
+                                             info->base[ictlr]);
        }
 
        parent_args = *args;
index 7dc93aa004c86cfa988993d53164ea1d665aff97..312ffd3d00177ca5a5e21393377c760e9bdec91e 100644 (file)
@@ -173,7 +173,7 @@ static void unmap_switcher(void)
 bool lguest_address_ok(const struct lguest *lg,
                       unsigned long addr, unsigned long len)
 {
-       return (addr+len) / PAGE_SIZE < lg->pfn_limit && (addr+len >= addr);
+       return addr+len <= lg->pfn_limit * PAGE_SIZE && (addr+len >= addr);
 }
 
 /*
index 5e7559be222adcd9724a08c2b8c2f135e5865769..eb934b0242e0e17652b7fc17c3255b5e70e40d15 100644 (file)
@@ -20,7 +20,7 @@
 #include "lg.h"
 
 /* Allow Guests to use a non-128 (ie. non-Linux) syscall trap. */
-static unsigned int syscall_vector = SYSCALL_VECTOR;
+static unsigned int syscall_vector = IA32_SYSCALL_VECTOR;
 module_param(syscall_vector, uint, 0444);
 
 /* The address of the interrupt handler is split into two bits: */
@@ -333,8 +333,8 @@ void set_interrupt(struct lg_cpu *cpu, unsigned int irq)
  */
 static bool could_be_syscall(unsigned int num)
 {
-       /* Normal Linux SYSCALL_VECTOR or reserved vector? */
-       return num == SYSCALL_VECTOR || num == syscall_vector;
+       /* Normal Linux IA32_SYSCALL_VECTOR or reserved vector? */
+       return num == IA32_SYSCALL_VECTOR || num == syscall_vector;
 }
 
 /* The syscall vector it wants must be unused by Host. */
@@ -351,7 +351,7 @@ bool check_syscall_vector(struct lguest *lg)
 int init_interrupts(void)
 {
        /* If they want some strange system call vector, reserve it now */
-       if (syscall_vector != SYSCALL_VECTOR) {
+       if (syscall_vector != IA32_SYSCALL_VECTOR) {
                if (test_bit(syscall_vector, used_vectors) ||
                    vector_used_by_percpu_irq(syscall_vector)) {
                        printk(KERN_ERR "lg: couldn't reserve syscall %u\n",
@@ -366,7 +366,7 @@ int init_interrupts(void)
 
 void free_interrupts(void)
 {
-       if (syscall_vector != SYSCALL_VECTOR)
+       if (syscall_vector != IA32_SYSCALL_VECTOR)
                clear_bit(syscall_vector, used_vectors);
 }
 
index 30f2aef69d787d7245b3e91c53b98a0a0216cdb9..6a4cd771a2be62b4172cc26a178ca85fbf7e6d27 100644 (file)
@@ -46,7 +46,7 @@
 #include <asm/setup.h>
 #include <asm/lguest.h>
 #include <asm/uaccess.h>
-#include <asm/i387.h>
+#include <asm/fpu/internal.h>
 #include <asm/tlbflush.h>
 #include "../lg.h"
 
@@ -251,7 +251,7 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
         * we set it now, so we can trap and pass that trap to the Guest if it
         * uses the FPU.
         */
-       if (cpu->ts && user_has_fpu())
+       if (cpu->ts && fpregs_active())
                stts();
 
        /*
@@ -283,7 +283,7 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
                wrmsr(MSR_IA32_SYSENTER_CS, __KERNEL_CS, 0);
 
        /* Clear the host TS bit if it was set above. */
-       if (cpu->ts && user_has_fpu())
+       if (cpu->ts && fpregs_active())
                clts();
 
        /*
@@ -297,12 +297,12 @@ void lguest_arch_run_guest(struct lg_cpu *cpu)
        /*
         * Similarly, if we took a trap because the Guest used the FPU,
         * we have to restore the FPU it expects to see.
-        * math_state_restore() may sleep and we may even move off to
+        * fpu__restore() may sleep and we may even move off to
         * a different CPU. So all the critical stuff should be done
         * before this.
         */
-       else if (cpu->regs->trapnum == 7 && !user_has_fpu())
-               math_state_restore();
+       else if (cpu->regs->trapnum == 7 && !fpregs_active())
+               fpu__restore(&current->thread.fpu);
 }
 
 /*H:130
index 2bc56e2a35262141859f8da21d09a54dec852e52..135a0907e9de413d140e9fb9b793a91b638a1606 100644 (file)
@@ -177,11 +177,16 @@ static struct md_rdev *next_active_rdev(struct md_rdev *rdev, struct mddev *mdde
         * nr_pending is 0 and In_sync is clear, the entries we return will
         * still be in the same position on the list when we re-enter
         * list_for_each_entry_continue_rcu.
+        *
+        * Note that if entered with 'rdev == NULL' to start at the
+        * beginning, we temporarily assign 'rdev' to an address which
+        * isn't really an rdev, but which can be used by
+        * list_for_each_entry_continue_rcu() to find the first entry.
         */
        rcu_read_lock();
        if (rdev == NULL)
                /* start at the beginning */
-               rdev = list_entry_rcu(&mddev->disks, struct md_rdev, same_set);
+               rdev = list_entry(&mddev->disks, struct md_rdev, same_set);
        else {
                /* release the previous rdev and start from there. */
                rdev_dec_pending(rdev, mddev);
index 9eeea196328acc63c3220c309399abf014dfbb4b..5503e43e5f28257a0df0be1620d21fadaefc6476 100644 (file)
@@ -925,10 +925,11 @@ static int crypt_convert(struct crypt_config *cc,
 
                switch (r) {
                /* async */
-               case -EINPROGRESS:
                case -EBUSY:
                        wait_for_completion(&ctx->restart);
                        reinit_completion(&ctx->restart);
+                       /* fall through*/
+               case -EINPROGRESS:
                        ctx->req = NULL;
                        ctx->cc_sector++;
                        continue;
@@ -1345,8 +1346,10 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
        struct dm_crypt_io *io = container_of(ctx, struct dm_crypt_io, ctx);
        struct crypt_config *cc = io->cc;
 
-       if (error == -EINPROGRESS)
+       if (error == -EINPROGRESS) {
+               complete(&ctx->restart);
                return;
+       }
 
        if (!error && cc->iv_gen_ops && cc->iv_gen_ops->post)
                error = cc->iv_gen_ops->post(cc, iv_of_dmreq(cc, dmreq), dmreq);
@@ -1357,15 +1360,12 @@ static void kcryptd_async_done(struct crypto_async_request *async_req,
        crypt_free_req(cc, req_of_dmreq(cc, dmreq), io->base_bio);
 
        if (!atomic_dec_and_test(&ctx->cc_pending))
-               goto done;
+               return;
 
        if (bio_data_dir(io->base_bio) == READ)
                kcryptd_crypt_read_done(io);
        else
                kcryptd_crypt_write_io_submit(io, 1);
-done:
-       if (!completion_done(&ctx->restart))
-               complete(&ctx->restart);
 }
 
 static void kcryptd_crypt(struct work_struct *work)
index 63953477a07c36e771a32d5bde686bd0f05890f1..eff7bdd7731d5e437d3b83ca4803ac8c03bac6b6 100644 (file)
@@ -429,9 +429,11 @@ static int __multipath_map(struct dm_target *ti, struct request *clone,
                /* blk-mq request-based interface */
                *__clone = blk_get_request(bdev_get_queue(bdev),
                                           rq_data_dir(rq), GFP_ATOMIC);
-               if (IS_ERR(*__clone))
+               if (IS_ERR(*__clone)) {
                        /* ENOMEM, requeue */
+                       clear_mapinfo(m, map_context);
                        return r;
+               }
                (*__clone)->bio = (*__clone)->biotail = NULL;
                (*__clone)->rq_disk = bdev->bd_disk;
                (*__clone)->cmd_flags |= REQ_FAILFAST_TRANSPORT;
index d9b00b8565c6dc1a36f5a3d863baa370126da593..16ba55ad708992f7e942b2f6ce2048d12be5c1b6 100644 (file)
@@ -820,6 +820,12 @@ void dm_consume_args(struct dm_arg_set *as, unsigned num_args)
 }
 EXPORT_SYMBOL(dm_consume_args);
 
+static bool __table_type_request_based(unsigned table_type)
+{
+       return (table_type == DM_TYPE_REQUEST_BASED ||
+               table_type == DM_TYPE_MQ_REQUEST_BASED);
+}
+
 static int dm_table_set_type(struct dm_table *t)
 {
        unsigned i;
@@ -852,8 +858,7 @@ static int dm_table_set_type(struct dm_table *t)
                 * Determine the type from the live device.
                 * Default to bio-based if device is new.
                 */
-               if (live_md_type == DM_TYPE_REQUEST_BASED ||
-                   live_md_type == DM_TYPE_MQ_REQUEST_BASED)
+               if (__table_type_request_based(live_md_type))
                        request_based = 1;
                else
                        bio_based = 1;
@@ -903,7 +908,7 @@ static int dm_table_set_type(struct dm_table *t)
                        }
                t->type = DM_TYPE_MQ_REQUEST_BASED;
 
-       } else if (hybrid && list_empty(devices) && live_md_type != DM_TYPE_NONE) {
+       } else if (list_empty(devices) && __table_type_request_based(live_md_type)) {
                /* inherit live MD type */
                t->type = live_md_type;
 
@@ -925,10 +930,7 @@ struct target_type *dm_table_get_immutable_target_type(struct dm_table *t)
 
 bool dm_table_request_based(struct dm_table *t)
 {
-       unsigned table_type = dm_table_get_type(t);
-
-       return (table_type == DM_TYPE_REQUEST_BASED ||
-               table_type == DM_TYPE_MQ_REQUEST_BASED);
+       return __table_type_request_based(dm_table_get_type(t));
 }
 
 bool dm_table_mq_request_based(struct dm_table *t)
index a930b72314ac985da702f8b47a8054a75b2e2ba8..2caf492890d64b27a0a88f24f4f04d1778448d9a 100644 (file)
@@ -1082,13 +1082,11 @@ static void rq_completed(struct mapped_device *md, int rw, bool run_queue)
        dm_put(md);
 }
 
-static void free_rq_clone(struct request *clone, bool must_be_mapped)
+static void free_rq_clone(struct request *clone)
 {
        struct dm_rq_target_io *tio = clone->end_io_data;
        struct mapped_device *md = tio->md;
 
-       WARN_ON_ONCE(must_be_mapped && !clone->q);
-
        blk_rq_unprep_clone(clone);
 
        if (md->type == DM_TYPE_MQ_REQUEST_BASED)
@@ -1132,7 +1130,7 @@ static void dm_end_request(struct request *clone, int error)
                        rq->sense_len = clone->sense_len;
        }
 
-       free_rq_clone(clone, true);
+       free_rq_clone(clone);
        if (!rq->q->mq_ops)
                blk_end_request_all(rq, error);
        else
@@ -1151,7 +1149,7 @@ static void dm_unprep_request(struct request *rq)
        }
 
        if (clone)
-               free_rq_clone(clone, false);
+               free_rq_clone(clone);
 }
 
 /*
@@ -1164,6 +1162,7 @@ static void old_requeue_request(struct request *rq)
 
        spin_lock_irqsave(q->queue_lock, flags);
        blk_requeue_request(q, rq);
+       blk_run_queue_async(q);
        spin_unlock_irqrestore(q->queue_lock, flags);
 }
 
@@ -1724,8 +1723,7 @@ static int dm_merge_bvec(struct request_queue *q,
        struct mapped_device *md = q->queuedata;
        struct dm_table *map = dm_get_live_table_fast(md);
        struct dm_target *ti;
-       sector_t max_sectors;
-       int max_size = 0;
+       sector_t max_sectors, max_size = 0;
 
        if (unlikely(!map))
                goto out;
@@ -1740,8 +1738,16 @@ static int dm_merge_bvec(struct request_queue *q,
        max_sectors = min(max_io_len(bvm->bi_sector, ti),
                          (sector_t) queue_max_sectors(q));
        max_size = (max_sectors << SECTOR_SHIFT) - bvm->bi_size;
-       if (unlikely(max_size < 0)) /* this shouldn't _ever_ happen */
-               max_size = 0;
+
+       /*
+        * FIXME: this stop-gap fix _must_ be cleaned up (by passing a sector_t
+        * to the targets' merge function since it holds sectors not bytes).
+        * Just doing this as an interim fix for stable@ because the more
+        * comprehensive cleanup of switching to sector_t will impact every
+        * DM target that implements a ->merge hook.
+        */
+       if (max_size > INT_MAX)
+               max_size = INT_MAX;
 
        /*
         * merge_bvec_fn() returns number of bytes
@@ -1749,7 +1755,7 @@ static int dm_merge_bvec(struct request_queue *q,
         * max is precomputed maximal io size
         */
        if (max_size && ti->type->merge)
-               max_size = ti->type->merge(ti, bvm, biovec, max_size);
+               max_size = ti->type->merge(ti, bvm, biovec, (int) max_size);
        /*
         * If the target doesn't support merge method and some of the devices
         * provided their merge_bvec method (we know this by looking for the
@@ -1971,8 +1977,8 @@ static int map_request(struct dm_rq_target_io *tio, struct request *rq,
                        dm_kill_unmapped_request(rq, r);
                        return r;
                }
-               if (IS_ERR(clone))
-                       return DM_MAPIO_REQUEUE;
+               if (r != DM_MAPIO_REMAPPED)
+                       return r;
                if (setup_clone(clone, rq, tio, GFP_ATOMIC)) {
                        /* -ENOMEM */
                        ti->type->release_clone_rq(clone);
@@ -2753,13 +2759,15 @@ static int dm_mq_queue_rq(struct blk_mq_hw_ctx *hctx,
        if (dm_table_get_type(map) == DM_TYPE_REQUEST_BASED) {
                /* clone request is allocated at the end of the pdu */
                tio->clone = (void *)blk_mq_rq_to_pdu(rq) + sizeof(struct dm_rq_target_io);
-               if (!clone_rq(rq, md, tio, GFP_ATOMIC))
-                       return BLK_MQ_RQ_QUEUE_BUSY;
+               (void) clone_rq(rq, md, tio, GFP_ATOMIC);
                queue_kthread_work(&md->kworker, &tio->work);
        } else {
                /* Direct call is fine since .queue_rq allows allocations */
-               if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE)
-                       dm_requeue_unmapped_original_request(md, rq);
+               if (map_request(tio, rq, md) == DM_MAPIO_REQUEUE) {
+                       /* Undo dm_start_request() before requeuing */
+                       rq_completed(md, rq_data_dir(rq), false);
+                       return BLK_MQ_RQ_QUEUE_BUSY;
+               }
        }
 
        return BLK_MQ_RQ_QUEUE_OK;
index d4f31e195e26ebcc4233c9b333624d8b73191826..4dbed4a67aaf40e3c04bde925870c24d13cd1b4e 100644 (file)
@@ -3834,7 +3834,7 @@ array_state_store(struct mddev *mddev, const char *buf, size_t len)
                                err = -EBUSY;
                }
                spin_unlock(&mddev->lock);
-               return err;
+               return err ?: len;
        }
        err = mddev_lock(mddev);
        if (err)
@@ -4211,34 +4211,36 @@ action_store(struct mddev *mddev, const char *page, size_t len)
        if (!mddev->pers || !mddev->pers->sync_request)
                return -EINVAL;
 
-       if (cmd_match(page, "frozen"))
-               set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
-       else
-               clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
 
        if (cmd_match(page, "idle") || cmd_match(page, "frozen")) {
-               flush_workqueue(md_misc_wq);
-               if (mddev->sync_thread) {
-                       set_bit(MD_RECOVERY_INTR, &mddev->recovery);
-                       if (mddev_lock(mddev) == 0) {
+               if (cmd_match(page, "frozen"))
+                       set_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+               else
+                       clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
+               if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) &&
+                   mddev_lock(mddev) == 0) {
+                       flush_workqueue(md_misc_wq);
+                       if (mddev->sync_thread) {
+                               set_bit(MD_RECOVERY_INTR, &mddev->recovery);
                                md_reap_sync_thread(mddev);
-                               mddev_unlock(mddev);
                        }
+                       mddev_unlock(mddev);
                }
        } else if (test_bit(MD_RECOVERY_RUNNING, &mddev->recovery) ||
                   test_bit(MD_RECOVERY_NEEDED, &mddev->recovery))
                return -EBUSY;
        else if (cmd_match(page, "resync"))
-               set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
+               clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
        else if (cmd_match(page, "recover")) {
+               clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
                set_bit(MD_RECOVERY_RECOVER, &mddev->recovery);
-               set_bit(MD_RECOVERY_NEEDED, &mddev->recovery);
        } else if (cmd_match(page, "reshape")) {
                int err;
                if (mddev->pers->start_reshape == NULL)
                        return -EINVAL;
                err = mddev_lock(mddev);
                if (!err) {
+                       clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
                        err = mddev->pers->start_reshape(mddev);
                        mddev_unlock(mddev);
                }
@@ -4250,6 +4252,7 @@ action_store(struct mddev *mddev, const char *page, size_t len)
                        set_bit(MD_RECOVERY_CHECK, &mddev->recovery);
                else if (!cmd_match(page, "repair"))
                        return -EINVAL;
+               clear_bit(MD_RECOVERY_FROZEN, &mddev->recovery);
                set_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
                set_bit(MD_RECOVERY_SYNC, &mddev->recovery);
        }
@@ -4818,12 +4821,12 @@ static void md_free(struct kobject *ko)
        if (mddev->sysfs_state)
                sysfs_put(mddev->sysfs_state);
 
+       if (mddev->queue)
+               blk_cleanup_queue(mddev->queue);
        if (mddev->gendisk) {
                del_gendisk(mddev->gendisk);
                put_disk(mddev->gendisk);
        }
-       if (mddev->queue)
-               blk_cleanup_queue(mddev->queue);
 
        kfree(mddev);
 }
@@ -8259,6 +8262,7 @@ void md_reap_sync_thread(struct mddev *mddev)
        if (mddev_is_clustered(mddev))
                md_cluster_ops->metadata_update_finish(mddev);
        clear_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
+       clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
        clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
        clear_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
        clear_bit(MD_RECOVERY_REQUESTED, &mddev->recovery);
index 2cb59a641cd24417d996df89c2e2c81cc9eb087b..efb654eb53992fc45da9a6e08572779496083c3f 100644 (file)
@@ -188,8 +188,9 @@ static int create_strip_zones(struct mddev *mddev, struct r0conf **private_conf)
                }
                dev[j] = rdev1;
 
-               disk_stack_limits(mddev->gendisk, rdev1->bdev,
-                                 rdev1->data_offset << 9);
+               if (mddev->queue)
+                       disk_stack_limits(mddev->gendisk, rdev1->bdev,
+                                         rdev1->data_offset << 9);
 
                if (rdev1->bdev->bd_disk->queue->merge_bvec_fn)
                        conf->has_merge_bvec = 1;
@@ -523,6 +524,9 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
                         ? (sector & (chunk_sects-1))
                         : sector_div(sector, chunk_sects));
 
+               /* Restore due to sector_div */
+               sector = bio->bi_iter.bi_sector;
+
                if (sectors < bio_sectors(bio)) {
                        split = bio_split(bio, sectors, GFP_NOIO, fs_bio_set);
                        bio_chain(split, bio);
@@ -530,7 +534,6 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
                        split = bio;
                }
 
-               sector = bio->bi_iter.bi_sector;
                zone = find_zone(mddev->private, &sector);
                tmp_dev = map_sector(mddev, zone, sector, &sector);
                split->bi_bdev = tmp_dev->bdev;
index e793ab6b35705e0ed1ad6904ebe9353b6dbf6fd6..f55c3f35b7463141086afb727785c775c5185d76 100644 (file)
@@ -4156,6 +4156,7 @@ static int raid10_start_reshape(struct mddev *mddev)
 
        clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
        clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+       clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
        set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
        set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
 
index 77dfd720aaa00ebc55d14234cd40b6e9b65bae5f..b6793d2e051f3b278405f236e6623980bcdf1d04 100644 (file)
@@ -749,6 +749,7 @@ static void unlock_two_stripes(struct stripe_head *sh1, struct stripe_head *sh2)
 static bool stripe_can_batch(struct stripe_head *sh)
 {
        return test_bit(STRIPE_BATCH_READY, &sh->state) &&
+               !test_bit(STRIPE_BITMAP_PENDING, &sh->state) &&
                is_full_stripe_write(sh);
 }
 
@@ -837,6 +838,15 @@ static void stripe_add_to_batch_list(struct r5conf *conf, struct stripe_head *sh
                    < IO_THRESHOLD)
                        md_wakeup_thread(conf->mddev->thread);
 
+       if (test_and_clear_bit(STRIPE_BIT_DELAY, &sh->state)) {
+               int seq = sh->bm_seq;
+               if (test_bit(STRIPE_BIT_DELAY, &sh->batch_head->state) &&
+                   sh->batch_head->bm_seq > seq)
+                       seq = sh->batch_head->bm_seq;
+               set_bit(STRIPE_BIT_DELAY, &sh->batch_head->state);
+               sh->batch_head->bm_seq = seq;
+       }
+
        atomic_inc(&sh->count);
 unlock_out:
        unlock_two_stripes(head, sh);
@@ -1078,9 +1088,6 @@ again:
                        pr_debug("skip op %ld on disc %d for sector %llu\n",
                                bi->bi_rw, i, (unsigned long long)sh->sector);
                        clear_bit(R5_LOCKED, &sh->dev[i].flags);
-                       if (sh->batch_head)
-                               set_bit(STRIPE_BATCH_ERR,
-                                       &sh->batch_head->state);
                        set_bit(STRIPE_HANDLE, &sh->state);
                }
 
@@ -1825,7 +1832,7 @@ again:
        } else
                init_async_submit(&submit, 0, tx, NULL, NULL,
                                  to_addr_conv(sh, percpu, j));
-       async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE,  &submit);
+       tx = async_gen_syndrome(blocks, 0, count+2, STRIPE_SIZE,  &submit);
        if (!last_stripe) {
                j++;
                sh = list_first_entry(&sh->batch_list, struct stripe_head,
@@ -1971,17 +1978,30 @@ static void raid_run_ops(struct stripe_head *sh, unsigned long ops_request)
        put_cpu();
 }
 
+static struct stripe_head *alloc_stripe(struct kmem_cache *sc, gfp_t gfp)
+{
+       struct stripe_head *sh;
+
+       sh = kmem_cache_zalloc(sc, gfp);
+       if (sh) {
+               spin_lock_init(&sh->stripe_lock);
+               spin_lock_init(&sh->batch_lock);
+               INIT_LIST_HEAD(&sh->batch_list);
+               INIT_LIST_HEAD(&sh->lru);
+               atomic_set(&sh->count, 1);
+       }
+       return sh;
+}
 static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
 {
        struct stripe_head *sh;
-       sh = kmem_cache_zalloc(conf->slab_cache, gfp);
+
+       sh = alloc_stripe(conf->slab_cache, gfp);
        if (!sh)
                return 0;
 
        sh->raid_conf = conf;
 
-       spin_lock_init(&sh->stripe_lock);
-
        if (grow_buffers(sh, gfp)) {
                shrink_buffers(sh);
                kmem_cache_free(conf->slab_cache, sh);
@@ -1990,13 +2010,8 @@ static int grow_one_stripe(struct r5conf *conf, gfp_t gfp)
        sh->hash_lock_index =
                conf->max_nr_stripes % NR_STRIPE_HASH_LOCKS;
        /* we just created an active stripe so... */
-       atomic_set(&sh->count, 1);
        atomic_inc(&conf->active_stripes);
-       INIT_LIST_HEAD(&sh->lru);
 
-       spin_lock_init(&sh->batch_lock);
-       INIT_LIST_HEAD(&sh->batch_list);
-       sh->batch_head = NULL;
        release_stripe(sh);
        conf->max_nr_stripes++;
        return 1;
@@ -2060,6 +2075,35 @@ static struct flex_array *scribble_alloc(int num, int cnt, gfp_t flags)
        return ret;
 }
 
+static int resize_chunks(struct r5conf *conf, int new_disks, int new_sectors)
+{
+       unsigned long cpu;
+       int err = 0;
+
+       mddev_suspend(conf->mddev);
+       get_online_cpus();
+       for_each_present_cpu(cpu) {
+               struct raid5_percpu *percpu;
+               struct flex_array *scribble;
+
+               percpu = per_cpu_ptr(conf->percpu, cpu);
+               scribble = scribble_alloc(new_disks,
+                                         new_sectors / STRIPE_SECTORS,
+                                         GFP_NOIO);
+
+               if (scribble) {
+                       flex_array_free(percpu->scribble);
+                       percpu->scribble = scribble;
+               } else {
+                       err = -ENOMEM;
+                       break;
+               }
+       }
+       put_online_cpus();
+       mddev_resume(conf->mddev);
+       return err;
+}
+
 static int resize_stripes(struct r5conf *conf, int newsize)
 {
        /* Make all the stripes able to hold 'newsize' devices.
@@ -2088,7 +2132,6 @@ static int resize_stripes(struct r5conf *conf, int newsize)
        struct stripe_head *osh, *nsh;
        LIST_HEAD(newstripes);
        struct disk_info *ndisks;
-       unsigned long cpu;
        int err;
        struct kmem_cache *sc;
        int i;
@@ -2109,13 +2152,11 @@ static int resize_stripes(struct r5conf *conf, int newsize)
                return -ENOMEM;
 
        for (i = conf->max_nr_stripes; i; i--) {
-               nsh = kmem_cache_zalloc(sc, GFP_KERNEL);
+               nsh = alloc_stripe(sc, GFP_KERNEL);
                if (!nsh)
                        break;
 
                nsh->raid_conf = conf;
-               spin_lock_init(&nsh->stripe_lock);
-
                list_add(&nsh->lru, &newstripes);
        }
        if (i) {
@@ -2142,13 +2183,11 @@ static int resize_stripes(struct r5conf *conf, int newsize)
                                    lock_device_hash_lock(conf, hash));
                osh = get_free_stripe(conf, hash);
                unlock_device_hash_lock(conf, hash);
-               atomic_set(&nsh->count, 1);
+
                for(i=0; i<conf->pool_size; i++) {
                        nsh->dev[i].page = osh->dev[i].page;
                        nsh->dev[i].orig_page = osh->dev[i].page;
                }
-               for( ; i<newsize; i++)
-                       nsh->dev[i].page = NULL;
                nsh->hash_lock_index = hash;
                kmem_cache_free(conf->slab_cache, osh);
                cnt++;
@@ -2174,25 +2213,6 @@ static int resize_stripes(struct r5conf *conf, int newsize)
        } else
                err = -ENOMEM;
 
-       get_online_cpus();
-       for_each_present_cpu(cpu) {
-               struct raid5_percpu *percpu;
-               struct flex_array *scribble;
-
-               percpu = per_cpu_ptr(conf->percpu, cpu);
-               scribble = scribble_alloc(newsize, conf->chunk_sectors /
-                       STRIPE_SECTORS, GFP_NOIO);
-
-               if (scribble) {
-                       flex_array_free(percpu->scribble);
-                       percpu->scribble = scribble;
-               } else {
-                       err = -ENOMEM;
-                       break;
-               }
-       }
-       put_online_cpus();
-
        /* Step 4, return new stripes to service */
        while(!list_empty(&newstripes)) {
                nsh = list_entry(newstripes.next, struct stripe_head, lru);
@@ -2212,7 +2232,8 @@ static int resize_stripes(struct r5conf *conf, int newsize)
 
        conf->slab_cache = sc;
        conf->active_name = 1-conf->active_name;
-       conf->pool_size = newsize;
+       if (!err)
+               conf->pool_size = newsize;
        return err;
 }
 
@@ -2434,7 +2455,7 @@ static void raid5_end_write_request(struct bio *bi, int error)
        }
        rdev_dec_pending(rdev, conf->mddev);
 
-       if (sh->batch_head && !uptodate)
+       if (sh->batch_head && !uptodate && !replacement)
                set_bit(STRIPE_BATCH_ERR, &sh->batch_head->state);
 
        if (!test_and_clear_bit(R5_DOUBLE_LOCKED, &sh->dev[i].flags))
@@ -2976,14 +2997,32 @@ static int add_stripe_bio(struct stripe_head *sh, struct bio *bi, int dd_idx,
        pr_debug("added bi b#%llu to stripe s#%llu, disk %d.\n",
                (unsigned long long)(*bip)->bi_iter.bi_sector,
                (unsigned long long)sh->sector, dd_idx);
-       spin_unlock_irq(&sh->stripe_lock);
 
        if (conf->mddev->bitmap && firstwrite) {
+               /* Cannot hold spinlock over bitmap_startwrite,
+                * but must ensure this isn't added to a batch until
+                * we have added to the bitmap and set bm_seq.
+                * So set STRIPE_BITMAP_PENDING to prevent
+                * batching.
+                * If multiple add_stripe_bio() calls race here they
+                * much all set STRIPE_BITMAP_PENDING.  So only the first one
+                * to complete "bitmap_startwrite" gets to set
+                * STRIPE_BIT_DELAY.  This is important as once a stripe
+                * is added to a batch, STRIPE_BIT_DELAY cannot be changed
+                * any more.
+                */
+               set_bit(STRIPE_BITMAP_PENDING, &sh->state);
+               spin_unlock_irq(&sh->stripe_lock);
                bitmap_startwrite(conf->mddev->bitmap, sh->sector,
                                  STRIPE_SECTORS, 0);
-               sh->bm_seq = conf->seq_flush+1;
-               set_bit(STRIPE_BIT_DELAY, &sh->state);
+               spin_lock_irq(&sh->stripe_lock);
+               clear_bit(STRIPE_BITMAP_PENDING, &sh->state);
+               if (!sh->batch_head) {
+                       sh->bm_seq = conf->seq_flush+1;
+                       set_bit(STRIPE_BIT_DELAY, &sh->state);
+               }
        }
+       spin_unlock_irq(&sh->stripe_lock);
 
        if (stripe_can_batch(sh))
                stripe_add_to_batch_list(conf, sh);
@@ -3278,7 +3317,9 @@ static int need_this_block(struct stripe_head *sh, struct stripe_head_state *s,
                /* reconstruct-write isn't being forced */
                return 0;
        for (i = 0; i < s->failed; i++) {
-               if (!test_bit(R5_UPTODATE, &fdev[i]->flags) &&
+               if (s->failed_num[i] != sh->pd_idx &&
+                   s->failed_num[i] != sh->qd_idx &&
+                   !test_bit(R5_UPTODATE, &fdev[i]->flags) &&
                    !test_bit(R5_OVERWRITE, &fdev[i]->flags))
                        return 1;
        }
@@ -3298,6 +3339,7 @@ static int fetch_block(struct stripe_head *sh, struct stripe_head_state *s,
                 */
                BUG_ON(test_bit(R5_Wantcompute, &dev->flags));
                BUG_ON(test_bit(R5_Wantread, &dev->flags));
+               BUG_ON(sh->batch_head);
                if ((s->uptodate == disks - 1) &&
                    (s->failed && (disk_idx == s->failed_num[0] ||
                                   disk_idx == s->failed_num[1]))) {
@@ -3366,7 +3408,6 @@ static void handle_stripe_fill(struct stripe_head *sh,
 {
        int i;
 
-       BUG_ON(sh->batch_head);
        /* look for blocks to read/compute, skip this if a compute
         * is already in flight, or if the stripe contents are in the
         * midst of changing due to a write
@@ -3379,6 +3420,8 @@ static void handle_stripe_fill(struct stripe_head *sh,
        set_bit(STRIPE_HANDLE, &sh->state);
 }
 
+static void break_stripe_batch_list(struct stripe_head *head_sh,
+                                   unsigned long handle_flags);
 /* handle_stripe_clean_event
  * any written block on an uptodate or failed drive can be returned.
  * Note that if we 'wrote' to a failed drive, it will be UPTODATE, but
@@ -3392,7 +3435,6 @@ static void handle_stripe_clean_event(struct r5conf *conf,
        int discard_pending = 0;
        struct stripe_head *head_sh = sh;
        bool do_endio = false;
-       int wakeup_nr = 0;
 
        for (i = disks; i--; )
                if (sh->dev[i].written) {
@@ -3481,44 +3523,8 @@ unhash:
                if (atomic_dec_and_test(&conf->pending_full_writes))
                        md_wakeup_thread(conf->mddev->thread);
 
-       if (!head_sh->batch_head || !do_endio)
-               return;
-       for (i = 0; i < head_sh->disks; i++) {
-               if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags))
-                       wakeup_nr++;
-       }
-       while (!list_empty(&head_sh->batch_list)) {
-               int i;
-               sh = list_first_entry(&head_sh->batch_list,
-                                     struct stripe_head, batch_list);
-               list_del_init(&sh->batch_list);
-
-               set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG,
-                             head_sh->state & ~((1 << STRIPE_ACTIVE) |
-                                                (1 << STRIPE_PREREAD_ACTIVE) |
-                                                STRIPE_EXPAND_SYNC_FLAG));
-               sh->check_state = head_sh->check_state;
-               sh->reconstruct_state = head_sh->reconstruct_state;
-               for (i = 0; i < sh->disks; i++) {
-                       if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
-                               wakeup_nr++;
-                       sh->dev[i].flags = head_sh->dev[i].flags;
-               }
-
-               spin_lock_irq(&sh->stripe_lock);
-               sh->batch_head = NULL;
-               spin_unlock_irq(&sh->stripe_lock);
-               if (sh->state & STRIPE_EXPAND_SYNC_FLAG)
-                       set_bit(STRIPE_HANDLE, &sh->state);
-               release_stripe(sh);
-       }
-
-       spin_lock_irq(&head_sh->stripe_lock);
-       head_sh->batch_head = NULL;
-       spin_unlock_irq(&head_sh->stripe_lock);
-       wake_up_nr(&conf->wait_for_overlap, wakeup_nr);
-       if (head_sh->state & STRIPE_EXPAND_SYNC_FLAG)
-               set_bit(STRIPE_HANDLE, &head_sh->state);
+       if (head_sh->batch_head && do_endio)
+               break_stripe_batch_list(head_sh, STRIPE_EXPAND_SYNC_FLAGS);
 }
 
 static void handle_stripe_dirtying(struct r5conf *conf,
@@ -4159,9 +4165,13 @@ static void analyse_stripe(struct stripe_head *sh, struct stripe_head_state *s)
 
 static int clear_batch_ready(struct stripe_head *sh)
 {
+       /* Return '1' if this is a member of batch, or
+        * '0' if it is a lone stripe or a head which can now be
+        * handled.
+        */
        struct stripe_head *tmp;
        if (!test_and_clear_bit(STRIPE_BATCH_READY, &sh->state))
-               return 0;
+               return (sh->batch_head && sh->batch_head != sh);
        spin_lock(&sh->stripe_lock);
        if (!sh->batch_head) {
                spin_unlock(&sh->stripe_lock);
@@ -4189,46 +4199,65 @@ static int clear_batch_ready(struct stripe_head *sh)
        return 0;
 }
 
-static void check_break_stripe_batch_list(struct stripe_head *sh)
+static void break_stripe_batch_list(struct stripe_head *head_sh,
+                                   unsigned long handle_flags)
 {
-       struct stripe_head *head_sh, *next;
+       struct stripe_head *sh, *next;
        int i;
+       int do_wakeup = 0;
 
-       if (!test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state))
-               return;
+       list_for_each_entry_safe(sh, next, &head_sh->batch_list, batch_list) {
 
-       head_sh = sh;
-       do {
-               sh = list_first_entry(&sh->batch_list,
-                                     struct stripe_head, batch_list);
-               BUG_ON(sh == head_sh);
-       } while (!test_bit(STRIPE_DEGRADED, &sh->state));
-
-       while (sh != head_sh) {
-               next = list_first_entry(&sh->batch_list,
-                                       struct stripe_head, batch_list);
                list_del_init(&sh->batch_list);
 
-               set_mask_bits(&sh->state, ~STRIPE_EXPAND_SYNC_FLAG,
-                             head_sh->state & ~((1 << STRIPE_ACTIVE) |
-                                                (1 << STRIPE_PREREAD_ACTIVE) |
-                                                (1 << STRIPE_DEGRADED) |
-                                                STRIPE_EXPAND_SYNC_FLAG));
+               WARN_ON_ONCE(sh->state & ((1 << STRIPE_ACTIVE) |
+                                         (1 << STRIPE_SYNCING) |
+                                         (1 << STRIPE_REPLACED) |
+                                         (1 << STRIPE_PREREAD_ACTIVE) |
+                                         (1 << STRIPE_DELAYED) |
+                                         (1 << STRIPE_BIT_DELAY) |
+                                         (1 << STRIPE_FULL_WRITE) |
+                                         (1 << STRIPE_BIOFILL_RUN) |
+                                         (1 << STRIPE_COMPUTE_RUN)  |
+                                         (1 << STRIPE_OPS_REQ_PENDING) |
+                                         (1 << STRIPE_DISCARD) |
+                                         (1 << STRIPE_BATCH_READY) |
+                                         (1 << STRIPE_BATCH_ERR) |
+                                         (1 << STRIPE_BITMAP_PENDING)));
+               WARN_ON_ONCE(head_sh->state & ((1 << STRIPE_DISCARD) |
+                                             (1 << STRIPE_REPLACED)));
+
+               set_mask_bits(&sh->state, ~(STRIPE_EXPAND_SYNC_FLAGS |
+                                           (1 << STRIPE_DEGRADED)),
+                             head_sh->state & (1 << STRIPE_INSYNC));
+
                sh->check_state = head_sh->check_state;
                sh->reconstruct_state = head_sh->reconstruct_state;
-               for (i = 0; i < sh->disks; i++)
+               for (i = 0; i < sh->disks; i++) {
+                       if (test_and_clear_bit(R5_Overlap, &sh->dev[i].flags))
+                               do_wakeup = 1;
                        sh->dev[i].flags = head_sh->dev[i].flags &
                                (~((1 << R5_WriteError) | (1 << R5_Overlap)));
-
+               }
                spin_lock_irq(&sh->stripe_lock);
                sh->batch_head = NULL;
                spin_unlock_irq(&sh->stripe_lock);
-
-               set_bit(STRIPE_HANDLE, &sh->state);
+               if (handle_flags == 0 ||
+                   sh->state & handle_flags)
+                       set_bit(STRIPE_HANDLE, &sh->state);
                release_stripe(sh);
-
-               sh = next;
        }
+       spin_lock_irq(&head_sh->stripe_lock);
+       head_sh->batch_head = NULL;
+       spin_unlock_irq(&head_sh->stripe_lock);
+       for (i = 0; i < head_sh->disks; i++)
+               if (test_and_clear_bit(R5_Overlap, &head_sh->dev[i].flags))
+                       do_wakeup = 1;
+       if (head_sh->state & handle_flags)
+               set_bit(STRIPE_HANDLE, &head_sh->state);
+
+       if (do_wakeup)
+               wake_up(&head_sh->raid_conf->wait_for_overlap);
 }
 
 static void handle_stripe(struct stripe_head *sh)
@@ -4253,7 +4282,8 @@ static void handle_stripe(struct stripe_head *sh)
                return;
        }
 
-       check_break_stripe_batch_list(sh);
+       if (test_and_clear_bit(STRIPE_BATCH_ERR, &sh->state))
+               break_stripe_batch_list(sh, 0);
 
        if (test_bit(STRIPE_SYNC_REQUESTED, &sh->state) && !sh->batch_head) {
                spin_lock(&sh->stripe_lock);
@@ -4307,6 +4337,7 @@ static void handle_stripe(struct stripe_head *sh)
        if (s.failed > conf->max_degraded) {
                sh->check_state = 0;
                sh->reconstruct_state = 0;
+               break_stripe_batch_list(sh, 0);
                if (s.to_read+s.to_write+s.written)
                        handle_failed_stripe(conf, sh, &s, disks, &s.return_bi);
                if (s.syncing + s.replacing)
@@ -6221,8 +6252,11 @@ static int alloc_scratch_buffer(struct r5conf *conf, struct raid5_percpu *percpu
                percpu->spare_page = alloc_page(GFP_KERNEL);
        if (!percpu->scribble)
                percpu->scribble = scribble_alloc(max(conf->raid_disks,
-                       conf->previous_raid_disks), conf->chunk_sectors /
-                       STRIPE_SECTORS, GFP_KERNEL);
+                                                     conf->previous_raid_disks),
+                                                 max(conf->chunk_sectors,
+                                                     conf->prev_chunk_sectors)
+                                                  / STRIPE_SECTORS,
+                                                 GFP_KERNEL);
 
        if (!percpu->scribble || (conf->level == 6 && !percpu->spare_page)) {
                free_scratch_buffer(conf, percpu);
@@ -7198,6 +7232,15 @@ static int check_reshape(struct mddev *mddev)
        if (!check_stripe_cache(mddev))
                return -ENOSPC;
 
+       if (mddev->new_chunk_sectors > mddev->chunk_sectors ||
+           mddev->delta_disks > 0)
+               if (resize_chunks(conf,
+                                 conf->previous_raid_disks
+                                 + max(0, mddev->delta_disks),
+                                 max(mddev->new_chunk_sectors,
+                                     mddev->chunk_sectors)
+                           ) < 0)
+                       return -ENOMEM;
        return resize_stripes(conf, (conf->previous_raid_disks
                                     + mddev->delta_disks));
 }
@@ -7311,6 +7354,7 @@ static int raid5_start_reshape(struct mddev *mddev)
 
        clear_bit(MD_RECOVERY_SYNC, &mddev->recovery);
        clear_bit(MD_RECOVERY_CHECK, &mddev->recovery);
+       clear_bit(MD_RECOVERY_DONE, &mddev->recovery);
        set_bit(MD_RECOVERY_RESHAPE, &mddev->recovery);
        set_bit(MD_RECOVERY_RUNNING, &mddev->recovery);
        mddev->sync_thread = md_register_thread(md_do_sync, mddev,
index 7dc0dd86074b1702276ccb51ba166a38d5d0f7e3..896d603ad0da964d2c45f22039d8b733f0bef26e 100644 (file)
@@ -337,9 +337,12 @@ enum {
        STRIPE_ON_RELEASE_LIST,
        STRIPE_BATCH_READY,
        STRIPE_BATCH_ERR,
+       STRIPE_BITMAP_PENDING,  /* Being added to bitmap, don't add
+                                * to batch yet.
+                                */
 };
 
-#define STRIPE_EXPAND_SYNC_FLAG \
+#define STRIPE_EXPAND_SYNC_FLAGS \
        ((1 << STRIPE_EXPAND_SOURCE) |\
        (1 << STRIPE_EXPAND_READY) |\
        (1 << STRIPE_EXPANDING) |\
index 3ef0f90b128fc5bdf6d5e5dff0d5bbfbd5190d7b..157099243d6152190211b8625ba656d45feae003 100644 (file)
@@ -97,6 +97,7 @@ config MEDIA_CONTROLLER
 config MEDIA_CONTROLLER_DVB
        bool "Enable Media controller for DVB"
        depends on MEDIA_CONTROLLER
+       depends on BROKEN
        ---help---
          Enable the media controller API support for DVB.
 
index dd6ee57e3a4c680da2708151275ce5e453a5892a..6e5867c57305253227096b3e9715bc92b4c761bd 100644 (file)
@@ -57,5 +57,8 @@ config VIDEO_FB_IVTV
          This is used in the Hauppauge PVR-350 card. There is a driver
          homepage at <http://www.ivtvdriver.org>.
 
+         In order to use this module, you will need to boot with PAT disabled
+         on x86 systems, using the nopat kernel parameter.
+
          To compile this driver as a module, choose M here: the
          module will be called ivtvfb.
index 9ff1230192e8308f08df35f0aad0a132bf14993a..4cb365d4ffdcc9c4e12cde82f46d411cb86c7a13 100644 (file)
@@ -44,8 +44,8 @@
 #include <linux/ivtvfb.h>
 #include <linux/slab.h>
 
-#ifdef CONFIG_MTRR
-#include <asm/mtrr.h>
+#ifdef CONFIG_X86_64
+#include <asm/pat.h>
 #endif
 
 #include "ivtv-driver.h"
@@ -155,12 +155,11 @@ struct osd_info {
        /* Buffer size */
        u32 video_buffer_size;
 
-#ifdef CONFIG_MTRR
        /* video_base rounded down as required by hardware MTRRs */
        unsigned long fb_start_aligned_physaddr;
        /* video_base rounded up as required by hardware MTRRs */
        unsigned long fb_end_aligned_physaddr;
-#endif
+       int wc_cookie;
 
        /* Store the buffer offset */
        int set_osd_coords_x;
@@ -1099,6 +1098,8 @@ static int ivtvfb_init_vidmode(struct ivtv *itv)
 static int ivtvfb_init_io(struct ivtv *itv)
 {
        struct osd_info *oi = itv->osd_info;
+       /* Find the largest power of two that maps the whole buffer */
+       int size_shift = 31;
 
        mutex_lock(&itv->serialize_lock);
        if (ivtv_init_on_first_open(itv)) {
@@ -1132,29 +1133,16 @@ static int ivtvfb_init_io(struct ivtv *itv)
                        oi->video_pbase, oi->video_vbase,
                        oi->video_buffer_size / 1024);
 
-#ifdef CONFIG_MTRR
-       {
-               /* Find the largest power of two that maps the whole buffer */
-               int size_shift = 31;
-
-               while (!(oi->video_buffer_size & (1 << size_shift))) {
-                       size_shift--;
-               }
-               size_shift++;
-               oi->fb_start_aligned_physaddr = oi->video_pbase & ~((1 << size_shift) - 1);
-               oi->fb_end_aligned_physaddr = oi->video_pbase + oi->video_buffer_size;
-               oi->fb_end_aligned_physaddr += (1 << size_shift) - 1;
-               oi->fb_end_aligned_physaddr &= ~((1 << size_shift) - 1);
-               if (mtrr_add(oi->fb_start_aligned_physaddr,
-                       oi->fb_end_aligned_physaddr - oi->fb_start_aligned_physaddr,
-                            MTRR_TYPE_WRCOMB, 1) < 0) {
-                       IVTVFB_INFO("disabled mttr\n");
-                       oi->fb_start_aligned_physaddr = 0;
-                       oi->fb_end_aligned_physaddr = 0;
-               }
-       }
-#endif
-
+       while (!(oi->video_buffer_size & (1 << size_shift)))
+               size_shift--;
+       size_shift++;
+       oi->fb_start_aligned_physaddr = oi->video_pbase & ~((1 << size_shift) - 1);
+       oi->fb_end_aligned_physaddr = oi->video_pbase + oi->video_buffer_size;
+       oi->fb_end_aligned_physaddr += (1 << size_shift) - 1;
+       oi->fb_end_aligned_physaddr &= ~((1 << size_shift) - 1);
+       oi->wc_cookie = arch_phys_wc_add(oi->fb_start_aligned_physaddr,
+                                        oi->fb_end_aligned_physaddr -
+                                        oi->fb_start_aligned_physaddr);
        /* Blank the entire osd. */
        memset_io(oi->video_vbase, 0, oi->video_buffer_size);
 
@@ -1172,14 +1160,7 @@ static void ivtvfb_release_buffers (struct ivtv *itv)
 
        /* Release pseudo palette */
        kfree(oi->ivtvfb_info.pseudo_palette);
-
-#ifdef CONFIG_MTRR
-       if (oi->fb_end_aligned_physaddr) {
-               mtrr_del(-1, oi->fb_start_aligned_physaddr,
-                       oi->fb_end_aligned_physaddr - oi->fb_start_aligned_physaddr);
-       }
-#endif
-
+       arch_phys_wc_del(oi->wc_cookie);
        kfree(oi);
        itv->osd_info = NULL;
 }
@@ -1284,6 +1265,13 @@ static int __init ivtvfb_init(void)
        int registered = 0;
        int err;
 
+#ifdef CONFIG_X86_64
+       if (WARN(pat_enabled(),
+                "ivtvfb needs PAT disabled, boot with nopat kernel parameter\n")) {
+               return -ENODEV;
+       }
+#endif
+
        if (ivtvfb_card_id < -1 || ivtvfb_card_id >= IVTV_MAX_CARDS) {
                printk(KERN_ERR "ivtvfb:  ivtvfb_card_id parameter is out of range (valid range: -1 - %d)\n",
                     IVTV_MAX_CARDS - 1);
index ae498b53ee4042ef3e39e6f77a7272cffe4abe74..46e3840c7a37392402deb53a7a9eb2cb7b8b27b6 100644 (file)
@@ -431,6 +431,10 @@ int da9052_adc_read_temp(struct da9052 *da9052)
 EXPORT_SYMBOL_GPL(da9052_adc_read_temp);
 
 static const struct mfd_cell da9052_subdev_info[] = {
+       {
+               .name = "da9052-regulator",
+               .id = 0,
+       },
        {
                .name = "da9052-regulator",
                .id = 1,
@@ -483,10 +487,6 @@ static const struct mfd_cell da9052_subdev_info[] = {
                .name = "da9052-regulator",
                .id = 13,
        },
-       {
-               .name = "da9052-regulator",
-               .id = 14,
-       },
        {
                .name = "da9052-onkey",
        },
index 2c25271f8c417e21982e43b2d163e65556e6aca1..60f7141a6b02e66c23b59404ea59ba7d716c057f 100644 (file)
@@ -1029,6 +1029,18 @@ static inline void mmc_blk_reset_success(struct mmc_blk_data *md, int type)
        md->reset_done &= ~type;
 }
 
+int mmc_access_rpmb(struct mmc_queue *mq)
+{
+       struct mmc_blk_data *md = mq->data;
+       /*
+        * If this is a RPMB partition access, return ture
+        */
+       if (md && md->part_type == EXT_CSD_PART_CONFIG_ACC_RPMB)
+               return true;
+
+       return false;
+}
+
 static int mmc_blk_issue_discard_rq(struct mmc_queue *mq, struct request *req)
 {
        struct mmc_blk_data *md = mq->data;
index 236d194c28835e87adb9bc108c55f5e122353c68..8efa3684aef849174ccef4053e049e3c95d8646f 100644 (file)
@@ -38,7 +38,7 @@ static int mmc_prep_request(struct request_queue *q, struct request *req)
                return BLKPREP_KILL;
        }
 
-       if (mq && mmc_card_removed(mq->card))
+       if (mq && (mmc_card_removed(mq->card) || mmc_access_rpmb(mq)))
                return BLKPREP_KILL;
 
        req->cmd_flags |= REQ_DONTPREP;
index 5752d50049a34c2a9ee8f6585105990f1e018d05..99e6521e61696202c036dfb00fe6bdcd96c0f613 100644 (file)
@@ -73,4 +73,6 @@ extern void mmc_queue_bounce_post(struct mmc_queue_req *);
 extern int mmc_packed_init(struct mmc_queue *, struct mmc_card *);
 extern void mmc_packed_clean(struct mmc_queue *);
 
+extern int mmc_access_rpmb(struct mmc_queue *);
+
 #endif
index c296bc098fe23684f4be66195b2342d7cd0159ef..92e7671426ebc214ce2d1ff2c35df50e68f02ee0 100644 (file)
@@ -2651,6 +2651,7 @@ int mmc_pm_notify(struct notifier_block *notify_block,
        switch (mode) {
        case PM_HIBERNATION_PREPARE:
        case PM_SUSPEND_PREPARE:
+       case PM_RESTORE_PREPARE:
                spin_lock_irqsave(&host->lock, flags);
                host->rescan_disable = 1;
                spin_unlock_irqrestore(&host->lock, flags);
index 03d7c7521d9712e051cd83579ef26d1938368469..9a39e0b7e583625e7fa8a3f24dab0179e3da880a 100644 (file)
@@ -1304,7 +1304,7 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
 
        if (ios->clock) {
                unsigned int clock_min = ~0U;
-               u32 clkdiv;
+               int clkdiv;
 
                spin_lock_bh(&host->lock);
                if (!host->mode_reg) {
@@ -1328,7 +1328,12 @@ static void atmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
                /* Calculate clock divider */
                if (host->caps.has_odd_clk_div) {
                        clkdiv = DIV_ROUND_UP(host->bus_hz, clock_min) - 2;
-                       if (clkdiv > 511) {
+                       if (clkdiv < 0) {
+                               dev_warn(&mmc->class_dev,
+                                        "clock %u too fast; using %lu\n",
+                                        clock_min, host->bus_hz / 2);
+                               clkdiv = 0;
+                       } else if (clkdiv > 511) {
                                dev_warn(&mmc->class_dev,
                                         "clock %u too slow; using %lu\n",
                                         clock_min, host->bus_hz / (511 + 2));
index 38b29265cc7c7625484db40b4d38f2148a2c1aa6..5f5adafb253afec16429c3bb357e15be8d07bbb7 100644 (file)
@@ -589,9 +589,11 @@ static int dw_mci_idmac_init(struct dw_mci *host)
                host->ring_size = PAGE_SIZE / sizeof(struct idmac_desc);
 
                /* Forward link the descriptor list */
-               for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++)
+               for (i = 0, p = host->sg_cpu; i < host->ring_size - 1; i++, p++) {
                        p->des3 = cpu_to_le32(host->sg_dma +
                                        (sizeof(struct idmac_desc) * (i + 1)));
+                       p->des1 = 0;
+               }
 
                /* Set the last descriptor as the end-of-ring descriptor */
                p->des3 = cpu_to_le32(host->sg_dma);
@@ -1300,7 +1302,8 @@ static int dw_mci_get_cd(struct mmc_host *mmc)
        int gpio_cd = mmc_gpio_get_cd(mmc);
 
        /* Use platform get_cd function, else try onboard card detect */
-       if (brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION)
+       if ((brd->quirks & DW_MCI_QUIRK_BROKEN_CARD_DETECTION) ||
+           (mmc->caps & MMC_CAP_NONREMOVABLE))
                present = 1;
        else if (!IS_ERR_VALUE(gpio_cd))
                present = gpio_cd;
index 2b6ef6bd5d5fee1c3277c4970549da294b4706da..7eff087cf515edfd1e4d51a68989197d82749744 100644 (file)
@@ -1408,7 +1408,7 @@ static int sh_mmcif_probe(struct platform_device *pdev)
        host            = mmc_priv(mmc);
        host->mmc       = mmc;
        host->addr      = reg;
-       host->timeout   = msecs_to_jiffies(1000);
+       host->timeout   = msecs_to_jiffies(10000);
        host->ccs_enable = !pd || !pd->ccs_unsupported;
        host->clk_ctrl2_enable = pd && pd->clk_ctrl2_present;
 
index 7c8b1694a134da91cbeb5b4764922c8e7d8d06c2..3af137f49ac9aa49c0c58ad59977b53b9d81f358 100644 (file)
@@ -223,7 +223,7 @@ static int m25p_probe(struct spi_device *spi)
         */
        if (data && data->type)
                flash_name = data->type;
-       else if (!strcmp(spi->modalias, "nor-jedec"))
+       else if (!strcmp(spi->modalias, "spi-nor"))
                flash_name = NULL; /* auto-detect */
        else
                flash_name = spi->modalias;
@@ -255,7 +255,7 @@ static int m25p_remove(struct spi_device *spi)
  * since most of these flash are compatible to some extent, and their
  * differences can often be differentiated by the JEDEC read-ID command, we
  * encourage new users to add support to the spi-nor library, and simply bind
- * against a generic string here (e.g., "nor-jedec").
+ * against a generic string here (e.g., "jedec,spi-nor").
  *
  * Many flash names are kept here in this list (as well as in spi-nor.c) to
  * keep them available as module aliases for existing platforms.
@@ -305,7 +305,7 @@ static const struct spi_device_id m25p_ids[] = {
         * Generic support for SPI NOR that can be identified by the JEDEC READ
         * ID opcode (0x9F). Use this, if possible.
         */
-       {"nor-jedec"},
+       {"spi-nor"},
        { },
 };
 MODULE_DEVICE_TABLE(spi, m25p_ids);
index a3196b750a220663b22f866fbf8173ca6fe81d4a..58df07acdbdb5f4c397511834265c233c3a7f62d 100644 (file)
@@ -191,9 +191,11 @@ static int __init mtd_readtest_init(void)
                                err = ret;
                }
 
-               err = mtdtest_relax();
-               if (err)
+               ret = mtdtest_relax();
+               if (ret) {
+                       err = ret;
                        goto out;
+               }
        }
 
        if (err)
index db2c05b6fe7facc9f07a565d40ad6ee507e3da6e..c9eb78f10a0db829ca396539ccf49331c9f06a1e 100644 (file)
@@ -310,6 +310,8 @@ static void ubiblock_do_work(struct work_struct *work)
        blk_rq_map_sg(req->q, req, pdu->usgl.sg);
 
        ret = ubiblock_read(pdu);
+       rq_flush_dcache_pages(req);
+
        blk_mq_end_request(req, ret);
 }
 
index 4df28943d2229035166d2bb4a72ec11c8f9671c5..e8d3c1d35453d1e182cd4b82388978f580abe946 100644 (file)
@@ -624,7 +624,7 @@ int __bond_opt_set(struct bonding *bond,
 out:
        if (ret)
                bond_opt_error_interpret(bond, opt, ret, val);
-       else
+       else if (bond->dev->reg_state == NETREG_REGISTERED)
                call_netdevice_notifiers(NETDEV_CHANGEINFODATA, bond->dev);
 
        return ret;
index 6bddfe062b516467b6cc2be75a53c17c2040b2d7..fc55e8e0351dfe5f3ca436d8aed64bf6d660db84 100644 (file)
@@ -509,10 +509,11 @@ static int xcan_rx(struct net_device *ndev)
                        cf->can_id |= CAN_RTR_FLAG;
        }
 
-       if (!(id_xcan & XCAN_IDR_SRR_MASK)) {
-               data[0] = priv->read_reg(priv, XCAN_RXFIFO_DW1_OFFSET);
-               data[1] = priv->read_reg(priv, XCAN_RXFIFO_DW2_OFFSET);
+       /* DW1/DW2 must always be read to remove message from RXFIFO */
+       data[0] = priv->read_reg(priv, XCAN_RXFIFO_DW1_OFFSET);
+       data[1] = priv->read_reg(priv, XCAN_RXFIFO_DW2_OFFSET);
 
+       if (!(cf->can_id & CAN_RTR_FLAG)) {
                /* Change Xilinx CAN data format to socketCAN data format */
                if (cf->can_dlc > 0)
                        *(__be32 *)(cf->data) = cpu_to_be32(data[0]);
index af639ab4c55b64fd886df0413d090afd2fe618ba..cf309aa92802623ec0532b56b1dd10a88a234af1 100644 (file)
@@ -1469,6 +1469,9 @@ static void __exit mv88e6xxx_cleanup(void)
 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6171)
        unregister_switch_driver(&mv88e6171_switch_driver);
 #endif
+#if IS_ENABLED(CONFIG_NET_DSA_MV88E6352)
+       unregister_switch_driver(&mv88e6352_switch_driver);
+#endif
 #if IS_ENABLED(CONFIG_NET_DSA_MV88E6123_61_65)
        unregister_switch_driver(&mv88e6123_61_65_switch_driver);
 #endif
index 089c269637b725da7876db7e4988baa77b5ec171..426916036151649ca3a2cef4e69eb00735826c05 100644 (file)
@@ -180,6 +180,7 @@ config SUNLANCE
 config AMD_XGBE
        tristate "AMD 10GbE Ethernet driver"
        depends on (OF_NET || ACPI) && HAS_IOMEM && HAS_DMA
+       depends on ARM64 || COMPILE_TEST
        select PHYLIB
        select AMD_XGBE_PHY
        select BITREVERSE
index db84ddcfec8464191a3edcccfd87c869ac1c5a7c..9fd6c69a8bac3c77d1c0c6e99eb4f3644561f78a 100644 (file)
@@ -423,7 +423,7 @@ static void xgbe_tx_timer(unsigned long data)
        if (napi_schedule_prep(napi)) {
                /* Disable Tx and Rx interrupts */
                if (pdata->per_channel_irq)
-                       disable_irq(channel->dma_irq);
+                       disable_irq_nosync(channel->dma_irq);
                else
                        xgbe_disable_rx_tx_ints(pdata);
 
index f4054d242f3c7ac6e52437c5cb795cac8e3da2dd..19e38afbc5ee3f5a015fd55ecba813c26a9bca1a 100644 (file)
@@ -1,6 +1,7 @@
 config NET_XGENE
        tristate "APM X-Gene SoC Ethernet Driver"
        depends on HAS_DMA
+       depends on ARCH_XGENE || COMPILE_TEST
        select PHYLIB
        help
          This is the Ethernet driver for the on-chip ethernet interface on the
index 77363d6805321534a582e579552f46e254737e25..a3b1c07ae0af0935f3026ba8a56e21512e238e36 100644 (file)
@@ -2464,6 +2464,7 @@ err_out_powerdown:
        ssb_bus_may_powerdown(sdev->bus);
 
 err_out_free_dev:
+       netif_napi_del(&bp->napi);
        free_netdev(dev);
 
 out:
@@ -2480,6 +2481,7 @@ static void b44_remove_one(struct ssb_device *sdev)
                b44_unregister_phy_one(bp);
        ssb_device_disable(sdev, 0);
        ssb_bus_may_powerdown(sdev->bus);
+       netif_napi_del(&bp->napi);
        free_netdev(dev);
        ssb_pcihost_set_power_state(sdev, PCI_D3hot);
        ssb_set_drvdata(sdev, NULL);
index a3b0f7a0c61e0d6ffeefcd88ae81ab751554e085..1f82a04ce01a8468e7d8dde208babdea4220ab88 100644 (file)
@@ -1774,7 +1774,7 @@ struct bnx2x {
        int                     stats_state;
 
        /* used for synchronization of concurrent threads statistics handling */
-       struct mutex            stats_lock;
+       struct semaphore        stats_lock;
 
        /* used by dmae command loader */
        struct dmae_command     stats_dmae;
index a8bb8f664d3d7f9a031158d5cac82820fbed7d6d..ec56a9b65dc3a313e1b0571e8a58047c161f6507 100644 (file)
@@ -4786,6 +4786,11 @@ int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
 {
        struct bnx2x *bp = netdev_priv(dev);
 
+       if (pci_num_vf(bp->pdev)) {
+               DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
+               return -EPERM;
+       }
+
        if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
                BNX2X_ERR("Can't perform change MTU during parity recovery\n");
                return -EAGAIN;
@@ -4938,11 +4943,6 @@ int bnx2x_resume(struct pci_dev *pdev)
        }
        bp = netdev_priv(dev);
 
-       if (pci_num_vf(bp->pdev)) {
-               DP(BNX2X_MSG_IOV, "VFs are enabled, can not change MTU\n");
-               return -EPERM;
-       }
-
        if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
                BNX2X_ERR("Handling parity error recovery. Try again later\n");
                return -EAGAIN;
index 556dcc162a6252a36fe550110b4abbb3d1bbcf60..33501bcddc48eb1f6157a08e3e3d1e08dc087c25 100644 (file)
@@ -12054,7 +12054,7 @@ static int bnx2x_init_bp(struct bnx2x *bp)
        mutex_init(&bp->port.phy_mutex);
        mutex_init(&bp->fw_mb_mutex);
        mutex_init(&bp->drv_info_mutex);
-       mutex_init(&bp->stats_lock);
+       sema_init(&bp->stats_lock, 1);
        bp->drv_info_mng_owner = false;
 
        INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
@@ -13371,8 +13371,13 @@ static int bnx2x_init_one(struct pci_dev *pdev,
        /* Management FW 'remembers' living interfaces. Allow it some time
         * to forget previously living interfaces, allowing a proper re-load.
         */
-       if (is_kdump_kernel())
-               msleep(5000);
+       if (is_kdump_kernel()) {
+               ktime_t now = ktime_get_boottime();
+               ktime_t fw_ready_time = ktime_set(5, 0);
+
+               if (ktime_before(now, fw_ready_time))
+                       msleep(ktime_ms_delta(fw_ready_time, now));
+       }
 
        /* An estimated maximum supported CoS number according to the chip
         * version.
@@ -13685,9 +13690,10 @@ static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
        cancel_delayed_work_sync(&bp->sp_task);
        cancel_delayed_work_sync(&bp->period_task);
 
-       mutex_lock(&bp->stats_lock);
-       bp->stats_state = STATS_STATE_DISABLED;
-       mutex_unlock(&bp->stats_lock);
+       if (!down_timeout(&bp->stats_lock, HZ / 10)) {
+               bp->stats_state = STATS_STATE_DISABLED;
+               up(&bp->stats_lock);
+       }
 
        bnx2x_save_statistics(bp);
 
index 266b055c2360af759c7f78395636d541210e5b9d..69d699f0730a3bd4d8980607e0a36cd8da461f1e 100644 (file)
@@ -1372,19 +1372,23 @@ void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
         * that context in case someone is in the middle of a transition.
         * For other events, wait a bit until lock is taken.
         */
-       if (!mutex_trylock(&bp->stats_lock)) {
+       if (down_trylock(&bp->stats_lock)) {
                if (event == STATS_EVENT_UPDATE)
                        return;
 
                DP(BNX2X_MSG_STATS,
                   "Unlikely stats' lock contention [event %d]\n", event);
-               mutex_lock(&bp->stats_lock);
+               if (unlikely(down_timeout(&bp->stats_lock, HZ / 10))) {
+                       BNX2X_ERR("Failed to take stats lock [event %d]\n",
+                                 event);
+                       return;
+               }
        }
 
        bnx2x_stats_stm[state][event].action(bp);
        bp->stats_state = bnx2x_stats_stm[state][event].next_state;
 
-       mutex_unlock(&bp->stats_lock);
+       up(&bp->stats_lock);
 
        if ((event != STATS_EVENT_UPDATE) || netif_msg_timer(bp))
                DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
@@ -1970,7 +1974,11 @@ int bnx2x_stats_safe_exec(struct bnx2x *bp,
        /* Wait for statistics to end [while blocking further requests],
         * then run supplied function 'safely'.
         */
-       mutex_lock(&bp->stats_lock);
+       rc = down_timeout(&bp->stats_lock, HZ / 10);
+       if (unlikely(rc)) {
+               BNX2X_ERR("Failed to take statistics lock for safe execution\n");
+               goto out_no_lock;
+       }
 
        bnx2x_stats_comp(bp);
        while (bp->stats_pending && cnt--)
@@ -1988,7 +1996,7 @@ out:
        /* No need to restart statistics - if they're enabled, the timer
         * will restart the statistics.
         */
-       mutex_unlock(&bp->stats_lock);
-
+       up(&bp->stats_lock);
+out_no_lock:
        return rc;
 }
index e7651b3c6c5767f7609115ef0430c13aac8d17a9..420949cc55aab6349b75c33f0c4f061aa384d537 100644 (file)
@@ -299,9 +299,6 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
                        phy_name = "external RGMII (no delay)";
                else
                        phy_name = "external RGMII (TX delay)";
-               reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
-               reg |= RGMII_MODE_EN | id_mode_dis;
-               bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
                bcmgenet_sys_writel(priv,
                                    PORT_MODE_EXT_GPHY, SYS_PORT_CTRL);
                break;
@@ -310,6 +307,15 @@ int bcmgenet_mii_config(struct net_device *dev, bool init)
                return -EINVAL;
        }
 
+       /* This is an external PHY (xMII), so we need to enable the RGMII
+        * block for the interface to work
+        */
+       if (priv->ext_phy) {
+               reg = bcmgenet_ext_readl(priv, EXT_RGMII_OOB_CTRL);
+               reg |= RGMII_MODE_EN | id_mode_dis;
+               bcmgenet_ext_writel(priv, reg, EXT_RGMII_OOB_CTRL);
+       }
+
        if (init)
                dev_info(kdev, "configuring instance for %s\n", phy_name);
 
index 594a2ab36d3175de2633490eec1e0395dbb74e59..68f3c13c9ef6d992ac7eadde882c16b51375d6e8 100644 (file)
@@ -2414,7 +2414,7 @@ bfa_ioc_boot(struct bfa_ioc *ioc, enum bfi_fwboot_type boot_type,
        if (status == BFA_STATUS_OK)
                bfa_ioc_lpu_start(ioc);
        else
-               bfa_nw_iocpf_timeout(ioc);
+               bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
 
        return status;
 }
@@ -3029,7 +3029,7 @@ bfa_ioc_poll_fwinit(struct bfa_ioc *ioc)
        }
 
        if (ioc->iocpf.poll_time >= BFA_IOC_TOV) {
-               bfa_nw_iocpf_timeout(ioc);
+               bfa_fsm_send_event(&ioc->iocpf, IOCPF_E_TIMEOUT);
        } else {
                ioc->iocpf.poll_time += BFA_IOC_POLL_TOV;
                mod_timer(&ioc->iocpf_timer, jiffies +
index 37072a83f9d6d0afb29de683051e13af94a78fd8..caae6cb2bc1a4528f4d97bd8e1e11adf074bc81e 100644 (file)
@@ -3701,10 +3701,6 @@ bnad_pci_probe(struct pci_dev *pdev,
        setup_timer(&bnad->bna.ioceth.ioc.sem_timer, bnad_iocpf_sem_timeout,
                                ((unsigned long)bnad));
 
-       /* Now start the timer before calling IOC */
-       mod_timer(&bnad->bna.ioceth.ioc.iocpf_timer,
-                 jiffies + msecs_to_jiffies(BNA_IOC_TIMER_FREQ));
-
        /*
         * Start the chip
         * If the call back comes with error, we bail out.
index ebf462d8082f79373c1ea234e4f3034a16c53e73..badea368bdc89621927101dc0a79504765b87248 100644 (file)
@@ -30,6 +30,7 @@ cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
                        u32 *bfi_image_size, char *fw_name)
 {
        const struct firmware *fw;
+       u32 n;
 
        if (request_firmware(&fw, fw_name, &pdev->dev)) {
                pr_alert("Can't locate firmware %s\n", fw_name);
@@ -40,6 +41,12 @@ cna_read_firmware(struct pci_dev *pdev, u32 **bfi_image,
        *bfi_image_size = fw->size/sizeof(u32);
        bfi_fw = fw;
 
+       /* Convert loaded firmware to host order as it is stored in file
+        * as sequence of LE32 integers.
+        */
+       for (n = 0; n < *bfi_image_size; n++)
+               le32_to_cpus(*bfi_image + n);
+
        return *bfi_image;
 error:
        return NULL;
index 4104d49f005d4a825eb14b99efeb5f158ab456d5..fc646a41d5481406400bb4013ced8f96cf236092 100644 (file)
@@ -350,6 +350,9 @@ static int macb_mii_probe(struct net_device *dev)
        else
                phydev->supported &= PHY_BASIC_FEATURES;
 
+       if (bp->caps & MACB_CAPS_NO_GIGABIT_HALF)
+               phydev->supported &= ~SUPPORTED_1000baseT_Half;
+
        phydev->advertising = phydev->supported;
 
        bp->link = 0;
@@ -981,7 +984,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
        struct macb_queue *queue = dev_id;
        struct macb *bp = queue->bp;
        struct net_device *dev = bp->dev;
-       u32 status;
+       u32 status, ctrl;
 
        status = queue_readl(queue, ISR);
 
@@ -1037,6 +1040,21 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
                 * add that if/when we get our hands on a full-blown MII PHY.
                 */
 
+               /* There is a hardware issue under heavy load where DMA can
+                * stop, this causes endless "used buffer descriptor read"
+                * interrupts but it can be cleared by re-enabling RX. See
+                * the at91 manual, section 41.3.1 or the Zynq manual
+                * section 16.7.4 for details.
+                */
+               if (status & MACB_BIT(RXUBR)) {
+                       ctrl = macb_readl(bp, NCR);
+                       macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
+                       macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
+
+                       if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+                               macb_writel(bp, ISR, MACB_BIT(RXUBR));
+               }
+
                if (status & MACB_BIT(ISR_ROVR)) {
                        /* We missed at least one packet */
                        if (macb_is_gem(bp))
@@ -2684,6 +2702,14 @@ static const struct macb_config emac_config = {
        .init = at91ether_init,
 };
 
+static const struct macb_config zynq_config = {
+       .caps = MACB_CAPS_SG_DISABLED | MACB_CAPS_GIGABIT_MODE_AVAILABLE |
+               MACB_CAPS_NO_GIGABIT_HALF,
+       .dma_burst_length = 16,
+       .clk_init = macb_clk_init,
+       .init = macb_init,
+};
+
 static const struct of_device_id macb_dt_ids[] = {
        { .compatible = "cdns,at32ap7000-macb" },
        { .compatible = "cdns,at91sam9260-macb", .data = &at91sam9260_config },
@@ -2694,6 +2720,7 @@ static const struct of_device_id macb_dt_ids[] = {
        { .compatible = "atmel,sama5d4-gem", .data = &sama5d4_config },
        { .compatible = "cdns,at91rm9200-emac", .data = &emac_config },
        { .compatible = "cdns,emac", .data = &emac_config },
+       { .compatible = "cdns,zynq-gem", .data = &zynq_config },
        { /* sentinel */ }
 };
 MODULE_DEVICE_TABLE(of, macb_dt_ids);
index eb7d76f7bf6aaf983e97408ced9b359b54c8ddc3..24b1d9bcd8654d5aba2401b7b6a85b563f09d9cc 100644 (file)
 #define MACB_CAPS_ISR_CLEAR_ON_WRITE           0x00000001
 #define MACB_CAPS_USRIO_HAS_CLKEN              0x00000002
 #define MACB_CAPS_USRIO_DEFAULT_IS_MII         0x00000004
+#define MACB_CAPS_NO_GIGABIT_HALF              0x00000008
 #define MACB_CAPS_FIFO_MODE                    0x10000000
 #define MACB_CAPS_GIGABIT_MODE_AVAILABLE       0x20000000
 #define MACB_CAPS_SG_DISABLED                  0x40000000
index 28d9ca675a274f9876473bcce7e6995a14e1289e..68d47b196daec3d3c5d0b8af19f8d167735e1e79 100644 (file)
@@ -131,8 +131,15 @@ static void enic_get_drvinfo(struct net_device *netdev,
 {
        struct enic *enic = netdev_priv(netdev);
        struct vnic_devcmd_fw_info *fw_info;
+       int err;
 
-       enic_dev_fw_info(enic, &fw_info);
+       err = enic_dev_fw_info(enic, &fw_info);
+       /* return only when pci_zalloc_consistent fails in vnic_dev_fw_info
+        * For other failures, like devcmd failure, we return previously
+        * recorded info.
+        */
+       if (err == -ENOMEM)
+               return;
 
        strlcpy(drvinfo->driver, DRV_NAME, sizeof(drvinfo->driver));
        strlcpy(drvinfo->version, DRV_VERSION, sizeof(drvinfo->version));
@@ -181,8 +188,15 @@ static void enic_get_ethtool_stats(struct net_device *netdev,
        struct enic *enic = netdev_priv(netdev);
        struct vnic_stats *vstats;
        unsigned int i;
-
-       enic_dev_stats_dump(enic, &vstats);
+       int err;
+
+       err = enic_dev_stats_dump(enic, &vstats);
+       /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
+        * For other failures, like devcmd failure, we return previously
+        * recorded stats.
+        */
+       if (err == -ENOMEM)
+               return;
 
        for (i = 0; i < enic_n_tx_stats; i++)
                *(data++) = ((u64 *)&vstats->tx)[enic_tx_stats[i].index];
index 204bd182473bceaaabaa5b1eba5ed618de751808..eadae1b412c652974dde24a9a76c5d74a8c3fa29 100644 (file)
@@ -615,8 +615,15 @@ static struct rtnl_link_stats64 *enic_get_stats(struct net_device *netdev,
 {
        struct enic *enic = netdev_priv(netdev);
        struct vnic_stats *stats;
+       int err;
 
-       enic_dev_stats_dump(enic, &stats);
+       err = enic_dev_stats_dump(enic, &stats);
+       /* return only when pci_zalloc_consistent fails in vnic_dev_stats_dump
+        * For other failures, like devcmd failure, we return previously
+        * recorded stats.
+        */
+       if (err == -ENOMEM)
+               return net_stats;
 
        net_stats->tx_packets = stats->tx.tx_frames_ok;
        net_stats->tx_bytes = stats->tx.tx_bytes_ok;
@@ -1407,6 +1414,7 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
                 */
                enic_calc_int_moderation(enic, &enic->rq[rq]);
 
+       enic_poll_unlock_napi(&enic->rq[rq]);
        if (work_done < work_to_do) {
 
                /* Some work done, but not enough to stay in polling,
@@ -1418,7 +1426,6 @@ static int enic_poll_msix_rq(struct napi_struct *napi, int budget)
                        enic_set_int_moderation(enic, &enic->rq[rq]);
                vnic_intr_unmask(&enic->intr[intr]);
        }
-       enic_poll_unlock_napi(&enic->rq[rq]);
 
        return work_done;
 }
index 36a2ed606c911f21355360fad81eb39b18162c59..c4b2183bf352fb2a1881001777df91857c2d1f79 100644 (file)
@@ -188,16 +188,15 @@ void vnic_rq_clean(struct vnic_rq *rq,
        struct vnic_rq_buf *buf;
        u32 fetch_index;
        unsigned int count = rq->ring.desc_count;
+       int i;
 
        buf = rq->to_clean;
 
-       while (vnic_rq_desc_used(rq) > 0) {
-
+       for (i = 0; i < rq->ring.desc_count; i++) {
                (*buf_clean)(rq, buf);
-
-               buf = rq->to_clean = buf->next;
-               rq->ring.desc_avail++;
+               buf = buf->next;
        }
+       rq->ring.desc_avail = rq->ring.desc_count - 1;
 
        /* Use current fetch_index as the ring starting point */
        fetch_index = ioread32(&rq->ctrl->fetch_index);
index fb140faeafb1cbda612cd11a9a1aac04e936c4a3..c5e1d0ac75f909f843dd0397ad41b85eeb26a164 100644 (file)
@@ -1720,9 +1720,9 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
        total_size = buf_len;
 
        get_fat_cmd.size = sizeof(struct be_cmd_req_get_fat) + 60*1024;
-       get_fat_cmd.va = pci_alloc_consistent(adapter->pdev,
-                                             get_fat_cmd.size,
-                                             &get_fat_cmd.dma);
+       get_fat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+                                            get_fat_cmd.size,
+                                            &get_fat_cmd.dma, GFP_ATOMIC);
        if (!get_fat_cmd.va) {
                dev_err(&adapter->pdev->dev,
                        "Memory allocation failure while reading FAT data\n");
@@ -1767,8 +1767,8 @@ int be_cmd_get_regs(struct be_adapter *adapter, u32 buf_len, void *buf)
                log_offset += buf_size;
        }
 err:
-       pci_free_consistent(adapter->pdev, get_fat_cmd.size,
-                           get_fat_cmd.va, get_fat_cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, get_fat_cmd.size,
+                         get_fat_cmd.va, get_fat_cmd.dma);
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
 }
@@ -2215,12 +2215,12 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
                return -EINVAL;
 
        cmd.size = sizeof(struct be_cmd_resp_port_type);
-       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+       cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+                                    GFP_ATOMIC);
        if (!cmd.va) {
                dev_err(&adapter->pdev->dev, "Memory allocation failed\n");
                return -ENOMEM;
        }
-       memset(cmd.va, 0, cmd.size);
 
        spin_lock_bh(&adapter->mcc_lock);
 
@@ -2245,7 +2245,7 @@ int be_cmd_read_port_transceiver_data(struct be_adapter *adapter,
        }
 err:
        spin_unlock_bh(&adapter->mcc_lock);
-       pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
        return status;
 }
 
@@ -2720,7 +2720,8 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
                goto err;
        }
        cmd.size = sizeof(struct be_cmd_req_get_phy_info);
-       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+       cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+                                    GFP_ATOMIC);
        if (!cmd.va) {
                dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
                status = -ENOMEM;
@@ -2754,7 +2755,7 @@ int be_cmd_get_phy_info(struct be_adapter *adapter)
                                BE_SUPPORTED_SPEED_1GBPS;
                }
        }
-       pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
 err:
        spin_unlock_bh(&adapter->mcc_lock);
        return status;
@@ -2805,8 +2806,9 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
 
        memset(&attribs_cmd, 0, sizeof(struct be_dma_mem));
        attribs_cmd.size = sizeof(struct be_cmd_resp_cntl_attribs);
-       attribs_cmd.va = pci_alloc_consistent(adapter->pdev, attribs_cmd.size,
-                                             &attribs_cmd.dma);
+       attribs_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+                                            attribs_cmd.size,
+                                            &attribs_cmd.dma, GFP_ATOMIC);
        if (!attribs_cmd.va) {
                dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
                status = -ENOMEM;
@@ -2833,8 +2835,8 @@ int be_cmd_get_cntl_attributes(struct be_adapter *adapter)
 err:
        mutex_unlock(&adapter->mbox_lock);
        if (attribs_cmd.va)
-               pci_free_consistent(adapter->pdev, attribs_cmd.size,
-                                   attribs_cmd.va, attribs_cmd.dma);
+               dma_free_coherent(&adapter->pdev->dev, attribs_cmd.size,
+                                 attribs_cmd.va, attribs_cmd.dma);
        return status;
 }
 
@@ -2972,9 +2974,10 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
 
        memset(&get_mac_list_cmd, 0, sizeof(struct be_dma_mem));
        get_mac_list_cmd.size = sizeof(struct be_cmd_resp_get_mac_list);
-       get_mac_list_cmd.va = pci_alloc_consistent(adapter->pdev,
-                                                  get_mac_list_cmd.size,
-                                                  &get_mac_list_cmd.dma);
+       get_mac_list_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+                                                 get_mac_list_cmd.size,
+                                                 &get_mac_list_cmd.dma,
+                                                 GFP_ATOMIC);
 
        if (!get_mac_list_cmd.va) {
                dev_err(&adapter->pdev->dev,
@@ -3047,8 +3050,8 @@ int be_cmd_get_mac_from_list(struct be_adapter *adapter, u8 *mac,
 
 out:
        spin_unlock_bh(&adapter->mcc_lock);
-       pci_free_consistent(adapter->pdev, get_mac_list_cmd.size,
-                           get_mac_list_cmd.va, get_mac_list_cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, get_mac_list_cmd.size,
+                         get_mac_list_cmd.va, get_mac_list_cmd.dma);
        return status;
 }
 
@@ -3101,8 +3104,8 @@ int be_cmd_set_mac_list(struct be_adapter *adapter, u8 *mac_array,
 
        memset(&cmd, 0, sizeof(struct be_dma_mem));
        cmd.size = sizeof(struct be_cmd_req_set_mac_list);
-       cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size,
-                                   &cmd.dma, GFP_KERNEL);
+       cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+                                    GFP_KERNEL);
        if (!cmd.va)
                return -ENOMEM;
 
@@ -3291,7 +3294,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
 
        memset(&cmd, 0, sizeof(struct be_dma_mem));
        cmd.size = sizeof(struct be_cmd_resp_acpi_wol_magic_config_v1);
-       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+       cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+                                    GFP_ATOMIC);
        if (!cmd.va) {
                dev_err(&adapter->pdev->dev, "Memory allocation failure\n");
                status = -ENOMEM;
@@ -3326,7 +3330,8 @@ int be_cmd_get_acpi_wol_cap(struct be_adapter *adapter)
 err:
        mutex_unlock(&adapter->mbox_lock);
        if (cmd.va)
-               pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+               dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+                                 cmd.dma);
        return status;
 
 }
@@ -3340,8 +3345,9 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
 
        memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
        extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
-       extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
-                                            &extfat_cmd.dma);
+       extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+                                           extfat_cmd.size, &extfat_cmd.dma,
+                                           GFP_ATOMIC);
        if (!extfat_cmd.va)
                return -ENOMEM;
 
@@ -3363,8 +3369,8 @@ int be_cmd_set_fw_log_level(struct be_adapter *adapter, u32 level)
 
        status = be_cmd_set_ext_fat_capabilites(adapter, &extfat_cmd, cfgs);
 err:
-       pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
-                           extfat_cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
+                         extfat_cmd.dma);
        return status;
 }
 
@@ -3377,8 +3383,9 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
 
        memset(&extfat_cmd, 0, sizeof(struct be_dma_mem));
        extfat_cmd.size = sizeof(struct be_cmd_resp_get_ext_fat_caps);
-       extfat_cmd.va = pci_alloc_consistent(adapter->pdev, extfat_cmd.size,
-                                            &extfat_cmd.dma);
+       extfat_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+                                           extfat_cmd.size, &extfat_cmd.dma,
+                                           GFP_ATOMIC);
 
        if (!extfat_cmd.va) {
                dev_err(&adapter->pdev->dev, "%s: Memory allocation failure\n",
@@ -3396,8 +3403,8 @@ int be_cmd_get_fw_log_level(struct be_adapter *adapter)
                                level = cfgs->module[0].trace_lvl[j].dbg_lvl;
                }
        }
-       pci_free_consistent(adapter->pdev, extfat_cmd.size, extfat_cmd.va,
-                           extfat_cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, extfat_cmd.size, extfat_cmd.va,
+                         extfat_cmd.dma);
 err:
        return level;
 }
@@ -3595,7 +3602,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
 
        memset(&cmd, 0, sizeof(struct be_dma_mem));
        cmd.size = sizeof(struct be_cmd_resp_get_func_config);
-       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+       cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+                                    GFP_ATOMIC);
        if (!cmd.va) {
                dev_err(&adapter->pdev->dev, "Memory alloc failure\n");
                status = -ENOMEM;
@@ -3635,7 +3643,8 @@ int be_cmd_get_func_config(struct be_adapter *adapter, struct be_resources *res)
 err:
        mutex_unlock(&adapter->mbox_lock);
        if (cmd.va)
-               pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+               dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+                                 cmd.dma);
        return status;
 }
 
@@ -3656,7 +3665,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
 
        memset(&cmd, 0, sizeof(struct be_dma_mem));
        cmd.size = sizeof(struct be_cmd_resp_get_profile_config);
-       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+       cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+                                    GFP_ATOMIC);
        if (!cmd.va)
                return -ENOMEM;
 
@@ -3702,7 +3712,8 @@ int be_cmd_get_profile_config(struct be_adapter *adapter,
                res->vf_if_cap_flags = vf_res->cap_flags;
 err:
        if (cmd.va)
-               pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+               dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+                                 cmd.dma);
        return status;
 }
 
@@ -3717,7 +3728,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
 
        memset(&cmd, 0, sizeof(struct be_dma_mem));
        cmd.size = sizeof(struct be_cmd_req_set_profile_config);
-       cmd.va = pci_alloc_consistent(adapter->pdev, cmd.size, &cmd.dma);
+       cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
+                                    GFP_ATOMIC);
        if (!cmd.va)
                return -ENOMEM;
 
@@ -3733,7 +3745,8 @@ static int be_cmd_set_profile_config(struct be_adapter *adapter, void *desc,
        status = be_cmd_notify_wait(adapter, &wrb);
 
        if (cmd.va)
-               pci_free_consistent(adapter->pdev, cmd.size, cmd.va, cmd.dma);
+               dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
+                                 cmd.dma);
        return status;
 }
 
index b765c24625bf523fd7932be17f6dfa22840a8e46..2835dee5dc3930cc5d1d09ec958bd2557228a2cd 100644 (file)
@@ -264,8 +264,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
        int status = 0;
 
        read_cmd.size = LANCER_READ_FILE_CHUNK;
-       read_cmd.va = pci_alloc_consistent(adapter->pdev, read_cmd.size,
-                                          &read_cmd.dma);
+       read_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev, read_cmd.size,
+                                         &read_cmd.dma, GFP_ATOMIC);
 
        if (!read_cmd.va) {
                dev_err(&adapter->pdev->dev,
@@ -289,8 +289,8 @@ static int lancer_cmd_read_file(struct be_adapter *adapter, u8 *file_name,
                        break;
                }
        }
-       pci_free_consistent(adapter->pdev, read_cmd.size, read_cmd.va,
-                           read_cmd.dma);
+       dma_free_coherent(&adapter->pdev->dev, read_cmd.size, read_cmd.va,
+                         read_cmd.dma);
 
        return status;
 }
@@ -818,8 +818,9 @@ static int be_test_ddr_dma(struct be_adapter *adapter)
        };
 
        ddrdma_cmd.size = sizeof(struct be_cmd_req_ddrdma_test);
-       ddrdma_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, ddrdma_cmd.size,
-                                          &ddrdma_cmd.dma, GFP_KERNEL);
+       ddrdma_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+                                           ddrdma_cmd.size, &ddrdma_cmd.dma,
+                                           GFP_KERNEL);
        if (!ddrdma_cmd.va)
                return -ENOMEM;
 
@@ -941,8 +942,9 @@ static int be_read_eeprom(struct net_device *netdev,
 
        memset(&eeprom_cmd, 0, sizeof(struct be_dma_mem));
        eeprom_cmd.size = sizeof(struct be_cmd_req_seeprom_read);
-       eeprom_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, eeprom_cmd.size,
-                                          &eeprom_cmd.dma, GFP_KERNEL);
+       eeprom_cmd.va = dma_zalloc_coherent(&adapter->pdev->dev,
+                                           eeprom_cmd.size, &eeprom_cmd.dma,
+                                           GFP_KERNEL);
 
        if (!eeprom_cmd.va)
                return -ENOMEM;
index a6dcbf850c1fd4e09462d40f5f0e7cc08cfb2088..e43cc8a73ea7e85a927443c077c18ce6c673751a 100644 (file)
@@ -2358,11 +2358,11 @@ static int be_evt_queues_create(struct be_adapter *adapter)
                                    adapter->cfg_num_qs);
 
        for_all_evt_queues(adapter, eqo, i) {
+               int numa_node = dev_to_node(&adapter->pdev->dev);
                if (!zalloc_cpumask_var(&eqo->affinity_mask, GFP_KERNEL))
                        return -ENOMEM;
-               cpumask_set_cpu_local_first(i, dev_to_node(&adapter->pdev->dev),
-                                           eqo->affinity_mask);
-
+               cpumask_set_cpu(cpumask_local_spread(i, numa_node),
+                               eqo->affinity_mask);
                netif_napi_add(adapter->netdev, &eqo->napi, be_poll,
                               BE_NAPI_WEIGHT);
                napi_hash_add(&eqo->napi);
@@ -4605,8 +4605,8 @@ static int lancer_fw_download(struct be_adapter *adapter,
 
        flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
                                + LANCER_FW_DOWNLOAD_CHUNK;
-       flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size,
-                                         &flash_cmd.dma, GFP_KERNEL);
+       flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size,
+                                          &flash_cmd.dma, GFP_KERNEL);
        if (!flash_cmd.va)
                return -ENOMEM;
 
@@ -4739,8 +4739,8 @@ static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
        }
 
        flash_cmd.size = sizeof(struct be_cmd_write_flashrom);
-       flash_cmd.va = dma_alloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
-                                         GFP_KERNEL);
+       flash_cmd.va = dma_zalloc_coherent(dev, flash_cmd.size, &flash_cmd.dma,
+                                          GFP_KERNEL);
        if (!flash_cmd.va)
                return -ENOMEM;
 
@@ -5291,16 +5291,15 @@ static int be_drv_init(struct be_adapter *adapter)
        int status = 0;
 
        mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
-       mbox_mem_alloc->va = dma_alloc_coherent(dev, mbox_mem_alloc->size,
-                                               &mbox_mem_alloc->dma,
-                                               GFP_KERNEL);
+       mbox_mem_alloc->va = dma_zalloc_coherent(dev, mbox_mem_alloc->size,
+                                                &mbox_mem_alloc->dma,
+                                                GFP_KERNEL);
        if (!mbox_mem_alloc->va)
                return -ENOMEM;
 
        mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
        mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
        mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
-       memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
 
        rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
        rx_filter->va = dma_zalloc_coherent(dev, rx_filter->size,
index de79193221903edee02810fe657ac44815e877ca..b9df0cbd0a3833321d1f73bc74258b50b137f225 100644 (file)
@@ -2084,12 +2084,8 @@ static void emac_ethtool_get_pauseparam(struct net_device *ndev,
 
 static int emac_get_regs_len(struct emac_instance *dev)
 {
-       if (emac_has_feature(dev, EMAC_FTR_EMAC4))
-               return sizeof(struct emac_ethtool_regs_subhdr) +
-                       EMAC4_ETHTOOL_REGS_SIZE(dev);
-       else
                return sizeof(struct emac_ethtool_regs_subhdr) +
-                       EMAC_ETHTOOL_REGS_SIZE(dev);
+                       sizeof(struct emac_regs);
 }
 
 static int emac_ethtool_get_regs_len(struct net_device *ndev)
@@ -2114,15 +2110,15 @@ static void *emac_dump_regs(struct emac_instance *dev, void *buf)
        struct emac_ethtool_regs_subhdr *hdr = buf;
 
        hdr->index = dev->cell_index;
-       if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
+       if (emac_has_feature(dev, EMAC_FTR_EMAC4SYNC)) {
+               hdr->version = EMAC4SYNC_ETHTOOL_REGS_VER;
+       } else if (emac_has_feature(dev, EMAC_FTR_EMAC4)) {
                hdr->version = EMAC4_ETHTOOL_REGS_VER;
-               memcpy_fromio(hdr + 1, dev->emacp, EMAC4_ETHTOOL_REGS_SIZE(dev));
-               return (void *)(hdr + 1) + EMAC4_ETHTOOL_REGS_SIZE(dev);
        } else {
                hdr->version = EMAC_ETHTOOL_REGS_VER;
-               memcpy_fromio(hdr + 1, dev->emacp, EMAC_ETHTOOL_REGS_SIZE(dev));
-               return (void *)(hdr + 1) + EMAC_ETHTOOL_REGS_SIZE(dev);
        }
+       memcpy_fromio(hdr + 1, dev->emacp, sizeof(struct emac_regs));
+       return (void *)(hdr + 1) + sizeof(struct emac_regs);
 }
 
 static void emac_ethtool_get_regs(struct net_device *ndev,
index 67f342a9f65e46fe8dd015b921fd144e30db286b..28df37420da963d5d8f3b3234e4f584442537121 100644 (file)
@@ -461,10 +461,7 @@ struct emac_ethtool_regs_subhdr {
 };
 
 #define EMAC_ETHTOOL_REGS_VER          0
-#define EMAC_ETHTOOL_REGS_SIZE(dev)    ((dev)->rsrc_regs.end - \
-                                        (dev)->rsrc_regs.start + 1)
-#define EMAC4_ETHTOOL_REGS_VER         1
-#define EMAC4_ETHTOOL_REGS_SIZE(dev)   ((dev)->rsrc_regs.end - \
-                                        (dev)->rsrc_regs.start + 1)
+#define EMAC4_ETHTOOL_REGS_VER         1
+#define EMAC4SYNC_ETHTOOL_REGS_VER     2
 
 #endif /* __IBM_NEWEMAC_CORE_H */
index 5d9ceb17b4cbad4f7e89cf0bb050e915f5b8d285..0abc942c966e4a377af222c1d876af6983edb91a 100644 (file)
@@ -40,6 +40,7 @@
 #include <linux/ptp_classify.h>
 #include <linux/mii.h>
 #include <linux/mdio.h>
+#include <linux/pm_qos.h>
 #include "hw.h"
 
 struct e1000_info;
index 1b0661e3573b78d73804ed59177420a4d6fde147..c754b2027281f8a2c0b18c079b31c2b7420eedbf 100644 (file)
@@ -610,7 +610,7 @@ static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
        unsigned int total_bytes = 0, total_packets = 0;
        u16 cleaned_count = fm10k_desc_unused(rx_ring);
 
-       do {
+       while (likely(total_packets < budget)) {
                union fm10k_rx_desc *rx_desc;
 
                /* return some buffers to hardware, one at a time is too slow */
@@ -659,7 +659,7 @@ static bool fm10k_clean_rx_irq(struct fm10k_q_vector *q_vector,
 
                /* update budget accounting */
                total_packets++;
-       } while (likely(total_packets < budget));
+       }
 
        /* place incomplete frames back on ring for completion */
        rx_ring->skb = skb;
index 33c35d3b7420fa9ae545aea4ebd5160036914718..5d47307121abbe413cd259ff74f9aa2ee68e6c45 100644 (file)
@@ -317,6 +317,7 @@ struct i40e_pf {
 #endif
 #define I40E_FLAG_PORT_ID_VALID                (u64)(1 << 28)
 #define I40E_FLAG_DCB_CAPABLE                  (u64)(1 << 29)
+#define I40E_FLAG_VEB_MODE_ENABLED             BIT_ULL(40)
 
        /* tracks features that get auto disabled by errors */
        u64 auto_disable_flags;
index 34170eabca7da939ba1c8b9b5fad14dc2f54370d..da0faf478af076199e4281b0f3da57ad92c5e62b 100644 (file)
@@ -1021,6 +1021,15 @@ static ssize_t i40e_dbg_command_write(struct file *filp,
                        goto command_write_done;
                }
 
+               /* By default we are in VEPA mode, if this is the first VF/VMDq
+                * VSI to be added switch to VEB mode.
+                */
+               if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
+                       pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+                       i40e_do_reset_safe(pf,
+                                          BIT_ULL(__I40E_PF_RESET_REQUESTED));
+               }
+
                vsi = i40e_vsi_setup(pf, I40E_VSI_VMDQ2, vsi_seid, 0);
                if (vsi)
                        dev_info(&pf->pdev->dev, "added VSI %d to relay %d\n",
index a54c14491e3b6a4dbc168980dd44d399b6766487..5b5bea159bd53c8684d0a69b310e492bc797c8b6 100644 (file)
@@ -6097,6 +6097,10 @@ static int i40e_reconstitute_veb(struct i40e_veb *veb)
        if (ret)
                goto end_reconstitute;
 
+       if (pf->flags & I40E_FLAG_VEB_MODE_ENABLED)
+               veb->bridge_mode = BRIDGE_MODE_VEB;
+       else
+               veb->bridge_mode = BRIDGE_MODE_VEPA;
        i40e_config_bridge_mode(veb);
 
        /* create the remaining VSIs attached to this VEB */
@@ -8031,7 +8035,12 @@ static int i40e_ndo_bridge_setlink(struct net_device *dev,
                } else if (mode != veb->bridge_mode) {
                        /* Existing HW bridge but different mode needs reset */
                        veb->bridge_mode = mode;
-                       i40e_do_reset(pf, (1 << __I40E_PF_RESET_REQUESTED));
+                       /* TODO: If no VFs or VMDq VSIs, disallow VEB mode */
+                       if (mode == BRIDGE_MODE_VEB)
+                               pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+                       else
+                               pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+                       i40e_do_reset(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
                        break;
                }
        }
@@ -8343,11 +8352,12 @@ static int i40e_add_vsi(struct i40e_vsi *vsi)
                ctxt.uplink_seid = vsi->uplink_seid;
                ctxt.connection_type = I40E_AQ_VSI_CONN_TYPE_NORMAL;
                ctxt.flags = I40E_AQ_VSI_TYPE_PF;
-               if (i40e_is_vsi_uplink_mode_veb(vsi)) {
+               if ((pf->flags & I40E_FLAG_VEB_MODE_ENABLED) &&
+                   (i40e_is_vsi_uplink_mode_veb(vsi))) {
                        ctxt.info.valid_sections |=
-                               cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
+                            cpu_to_le16(I40E_AQ_VSI_PROP_SWITCH_VALID);
                        ctxt.info.switch_id =
-                               cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
+                          cpu_to_le16(I40E_AQ_VSI_SW_ID_FLAG_ALLOW_LB);
                }
                i40e_vsi_setup_queue_map(vsi, &ctxt, enabled_tc, true);
                break;
@@ -8746,6 +8756,14 @@ struct i40e_vsi *i40e_vsi_setup(struct i40e_pf *pf, u8 type,
                                         __func__);
                                return NULL;
                        }
+                       /* We come up by default in VEPA mode if SRIOV is not
+                        * already enabled, in which case we can't force VEPA
+                        * mode.
+                        */
+                       if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
+                               veb->bridge_mode = BRIDGE_MODE_VEPA;
+                               pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+                       }
                        i40e_config_bridge_mode(veb);
                }
                for (i = 0; i < I40E_MAX_VEB && !veb; i++) {
@@ -9856,6 +9874,15 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
                goto err_switch_setup;
        }
 
+#ifdef CONFIG_PCI_IOV
+       /* prep for VF support */
+       if ((pf->flags & I40E_FLAG_SRIOV_ENABLED) &&
+           (pf->flags & I40E_FLAG_MSIX_ENABLED) &&
+           !test_bit(__I40E_BAD_EEPROM, &pf->state)) {
+               if (pci_num_vf(pdev))
+                       pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+       }
+#endif
        err = i40e_setup_pf_switch(pf, false);
        if (err) {
                dev_info(&pdev->dev, "setup_pf_switch failed: %d\n", err);
index 4bd3a80aba82998bba343a1870b2d21f59bca4e0..9d95042d5a0f5805824d53ecc847ff76a9909444 100644 (file)
@@ -2410,14 +2410,12 @@ static int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
  * i40e_chk_linearize - Check if there are more than 8 fragments per packet
  * @skb:      send buffer
  * @tx_flags: collected send information
- * @hdr_len:  size of the packet header
  *
  * Note: Our HW can't scatter-gather more than 8 fragments to build
  * a packet on the wire and so we need to figure out the cases where we
  * need to linearize the skb.
  **/
-static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
-                              const u8 hdr_len)
+static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
 {
        struct skb_frag_struct *frag;
        bool linearize = false;
@@ -2429,7 +2427,7 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
        gso_segs = skb_shinfo(skb)->gso_segs;
 
        if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
-               u16 j = 1;
+               u16 j = 0;
 
                if (num_frags < (I40E_MAX_BUFFER_TXD))
                        goto linearize_chk_done;
@@ -2440,21 +2438,18 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
                        goto linearize_chk_done;
                }
                frag = &skb_shinfo(skb)->frags[0];
-               size = hdr_len;
                /* we might still have more fragments per segment */
                do {
                        size += skb_frag_size(frag);
                        frag++; j++;
+                       if ((size >= skb_shinfo(skb)->gso_size) &&
+                           (j < I40E_MAX_BUFFER_TXD)) {
+                               size = (size % skb_shinfo(skb)->gso_size);
+                               j = (size) ? 1 : 0;
+                       }
                        if (j == I40E_MAX_BUFFER_TXD) {
-                               if (size < skb_shinfo(skb)->gso_size) {
-                                       linearize = true;
-                                       break;
-                               }
-                               j = 1;
-                               size -= skb_shinfo(skb)->gso_size;
-                               if (size)
-                                       j++;
-                               size += hdr_len;
+                               linearize = true;
+                               break;
                        }
                        num_frags--;
                } while (num_frags);
@@ -2724,7 +2719,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
        if (tsyn)
                tx_flags |= I40E_TX_FLAGS_TSYN;
 
-       if (i40e_chk_linearize(skb, tx_flags, hdr_len))
+       if (i40e_chk_linearize(skb, tx_flags))
                if (skb_linearize(skb))
                        goto out_drop;
 
index 78d1c4ff565e8853473b70c3827e6a727ff3ce1c..4e9376da051829969de7750c2dc7a66acc5e5f40 100644 (file)
@@ -1018,11 +1018,19 @@ int i40e_pci_sriov_configure(struct pci_dev *pdev, int num_vfs)
 {
        struct i40e_pf *pf = pci_get_drvdata(pdev);
 
-       if (num_vfs)
+       if (num_vfs) {
+               if (!(pf->flags & I40E_FLAG_VEB_MODE_ENABLED)) {
+                       pf->flags |= I40E_FLAG_VEB_MODE_ENABLED;
+                       i40e_do_reset_safe(pf,
+                                          BIT_ULL(__I40E_PF_RESET_REQUESTED));
+               }
                return i40e_pci_sriov_enable(pdev, num_vfs);
+       }
 
        if (!pci_vfs_assigned(pf->pdev)) {
                i40e_free_vfs(pf);
+               pf->flags &= ~I40E_FLAG_VEB_MODE_ENABLED;
+               i40e_do_reset_safe(pf, BIT_ULL(__I40E_PF_RESET_REQUESTED));
        } else {
                dev_warn(&pdev->dev, "Unable to free VFs because some are assigned to VMs.\n");
                return -EINVAL;
index b077e02a0cc7ac8f67ad90560cf990f8f7a66277..458fbb421090772d0bbc1620277624339e0cd757 100644 (file)
@@ -1619,14 +1619,12 @@ static void i40e_create_tx_ctx(struct i40e_ring *tx_ring,
  * i40e_chk_linearize - Check if there are more than 8 fragments per packet
  * @skb:      send buffer
  * @tx_flags: collected send information
- * @hdr_len:  size of the packet header
  *
  * Note: Our HW can't scatter-gather more than 8 fragments to build
  * a packet on the wire and so we need to figure out the cases where we
  * need to linearize the skb.
  **/
-static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
-                              const u8 hdr_len)
+static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags)
 {
        struct skb_frag_struct *frag;
        bool linearize = false;
@@ -1638,7 +1636,7 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
        gso_segs = skb_shinfo(skb)->gso_segs;
 
        if (tx_flags & (I40E_TX_FLAGS_TSO | I40E_TX_FLAGS_FSO)) {
-               u16 j = 1;
+               u16 j = 0;
 
                if (num_frags < (I40E_MAX_BUFFER_TXD))
                        goto linearize_chk_done;
@@ -1649,21 +1647,18 @@ static bool i40e_chk_linearize(struct sk_buff *skb, u32 tx_flags,
                        goto linearize_chk_done;
                }
                frag = &skb_shinfo(skb)->frags[0];
-               size = hdr_len;
                /* we might still have more fragments per segment */
                do {
                        size += skb_frag_size(frag);
                        frag++; j++;
+                       if ((size >= skb_shinfo(skb)->gso_size) &&
+                           (j < I40E_MAX_BUFFER_TXD)) {
+                               size = (size % skb_shinfo(skb)->gso_size);
+                               j = (size) ? 1 : 0;
+                       }
                        if (j == I40E_MAX_BUFFER_TXD) {
-                               if (size < skb_shinfo(skb)->gso_size) {
-                                       linearize = true;
-                                       break;
-                               }
-                               j = 1;
-                               size -= skb_shinfo(skb)->gso_size;
-                               if (size)
-                                       j++;
-                               size += hdr_len;
+                               linearize = true;
+                               break;
                        }
                        num_frags--;
                } while (num_frags);
@@ -1950,7 +1945,7 @@ static netdev_tx_t i40e_xmit_frame_ring(struct sk_buff *skb,
        else if (tso)
                tx_flags |= I40E_TX_FLAGS_TSO;
 
-       if (i40e_chk_linearize(skb, tx_flags, hdr_len))
+       if (i40e_chk_linearize(skb, tx_flags))
                if (skb_linearize(skb))
                        goto out_drop;
 
index 8457d0306e3a76107c18ed524a3000d47b3ead6e..a0a9b1fcb5e8efcf4f7ebfe980459f64056e896f 100644 (file)
@@ -1036,7 +1036,7 @@ static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
                adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
 
        if (q_vector->rx.ring)
-               adapter->tx_ring[q_vector->rx.ring->queue_index] = NULL;
+               adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
 
        netif_napi_del(&q_vector->napi);
 
@@ -1207,6 +1207,8 @@ static int igb_alloc_q_vector(struct igb_adapter *adapter,
        q_vector = adapter->q_vector[v_idx];
        if (!q_vector)
                q_vector = kzalloc(size, GFP_KERNEL);
+       else
+               memset(q_vector, 0, size);
        if (!q_vector)
                return -ENOMEM;
 
index e3b9b63ad01083cb987429f57c9ebef84d86f4db..c3a9392cbc192229f4178c913fad8ab64d8c44c3 100644 (file)
@@ -538,8 +538,8 @@ static int igb_ptp_feature_enable_i210(struct ptp_clock_info *ptp,
                        igb->perout[i].start.tv_nsec = rq->perout.start.nsec;
                        igb->perout[i].period.tv_sec = ts.tv_sec;
                        igb->perout[i].period.tv_nsec = ts.tv_nsec;
-                       wr32(trgttiml, rq->perout.start.sec);
-                       wr32(trgttimh, rq->perout.start.nsec);
+                       wr32(trgttimh, rq->perout.start.sec);
+                       wr32(trgttiml, rq->perout.start.nsec);
                        tsauxc |= tsauxc_mask;
                        tsim |= tsim_mask;
                } else {
index a16d267fbce4b0f6883c8cf5d43db1ad3bf7e458..e71cdde9cb017aecab834d2f2d9c5d4821c3d42e 100644 (file)
@@ -3612,7 +3612,7 @@ static int ixgbevf_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
        u8 *dst_mac = skb_header_pointer(skb, 0, 0, NULL);
 
        if (!dst_mac || is_link_local_ether_addr(dst_mac)) {
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
                return NETDEV_TX_OK;
        }
 
index 4f7dc044601e2751ad625e4c011aa3a1c328e62f..529ef0594b902ebaf2838cf478ef914a0b69d5b7 100644 (file)
@@ -714,8 +714,13 @@ static int mlx4_cmd_wait(struct mlx4_dev *dev, u64 in_param, u64 *out_param,
                                         msecs_to_jiffies(timeout))) {
                mlx4_warn(dev, "command 0x%x timed out (go bit not cleared)\n",
                          op);
-               err = -EIO;
-               goto out_reset;
+               if (op == MLX4_CMD_NOP) {
+                       err = -EBUSY;
+                       goto out;
+               } else {
+                       err = -EIO;
+                       goto out_reset;
+               }
        }
 
        err = context->result;
index 32f5ec7374723d1315f4234f77b12ffbe5adcfe0..cf467a9f6cc78c0c8a53b9120cec2795888f4904 100644 (file)
@@ -1501,17 +1501,13 @@ static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
 {
        struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
        int numa_node = priv->mdev->dev->numa_node;
-       int ret = 0;
 
        if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
                return -ENOMEM;
 
-       ret = cpumask_set_cpu_local_first(ring_idx, numa_node,
-                                         ring->affinity_mask);
-       if (ret)
-               free_cpumask_var(ring->affinity_mask);
-
-       return ret;
+       cpumask_set_cpu(cpumask_local_spread(ring_idx, numa_node),
+                       ring->affinity_mask);
+       return 0;
 }
 
 static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
index 54f0e5ab2e55ca87dc66a2ef8b4e27062a634ce4..0a56f010c8468d0734c3afd791605843d373b91a 100644 (file)
@@ -139,7 +139,7 @@ static unsigned long en_stats_adder(__be64 *start, __be64 *next, int num)
        int i;
        int offset = next - start;
 
-       for (i = 0; i <= num; i++) {
+       for (i = 0; i < num; i++) {
                ret += be64_to_cpu(*curr);
                curr += offset;
        }
index f7bf312fb44311b1c436c4eb3706341db92c1db0..7bed3a88579fa9db92d7e42ad7d43265bd8a3d41 100644 (file)
@@ -144,9 +144,9 @@ int mlx4_en_create_tx_ring(struct mlx4_en_priv *priv,
        ring->queue_index = queue_index;
 
        if (queue_index < priv->num_tx_rings_p_up)
-               cpumask_set_cpu_local_first(queue_index,
-                                           priv->mdev->dev->numa_node,
-                                           &ring->affinity_mask);
+               cpumask_set_cpu(cpumask_local_spread(queue_index,
+                                                    priv->mdev->dev->numa_node),
+                               &ring->affinity_mask);
 
        *pring = ring;
        return 0;
index c7f28bf4b8e21436cc927c8212c5cc6b57706e51..bafe2180cf0c413c4d971f8043e401a018dc8100 100644 (file)
@@ -2845,7 +2845,7 @@ int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
 {
        int err;
        int eqn = vhcr->in_modifier;
-       int res_id = (slave << 8) | eqn;
+       int res_id = (slave << 10) | eqn;
        struct mlx4_eq_context *eqc = inbox->buf;
        int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
        int mtt_size = eq_get_mtt_size(eqc);
@@ -3051,7 +3051,7 @@ int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
                          struct mlx4_cmd_info *cmd)
 {
        int eqn = vhcr->in_modifier;
-       int res_id = eqn | (slave << 8);
+       int res_id = eqn | (slave << 10);
        struct res_eq *eq;
        int err;
 
@@ -3108,7 +3108,7 @@ int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
                return 0;
 
        mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
-       res_id = (slave << 8) | event_eq->eqn;
+       res_id = (slave << 10) | event_eq->eqn;
        err = get_res(dev, slave, res_id, RES_EQ, &req);
        if (err)
                goto unlock;
@@ -3131,7 +3131,7 @@ int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
 
        memcpy(mailbox->buf, (u8 *) eqe, 28);
 
-       in_modifier = (slave & 0xff) | ((event_eq->eqn & 0xff) << 16);
+       in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
 
        err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
                       MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
@@ -3157,7 +3157,7 @@ int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
                          struct mlx4_cmd_info *cmd)
 {
        int eqn = vhcr->in_modifier;
-       int res_id = eqn | (slave << 8);
+       int res_id = eqn | (slave << 10);
        struct res_eq *eq;
        int err;
 
@@ -3187,7 +3187,7 @@ int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
        int cqn = vhcr->in_modifier;
        struct mlx4_cq_context *cqc = inbox->buf;
        int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
-       struct res_cq *cq;
+       struct res_cq *cq = NULL;
        struct res_mtt *mtt;
 
        err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
@@ -3223,7 +3223,7 @@ int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
 {
        int err;
        int cqn = vhcr->in_modifier;
-       struct res_cq *cq;
+       struct res_cq *cq = NULL;
 
        err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
        if (err)
@@ -3362,7 +3362,7 @@ int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
        int err;
        int srqn = vhcr->in_modifier;
        struct res_mtt *mtt;
-       struct res_srq *srq;
+       struct res_srq *srq = NULL;
        struct mlx4_srq_context *srqc = inbox->buf;
        int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
 
@@ -3406,7 +3406,7 @@ int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
 {
        int err;
        int srqn = vhcr->in_modifier;
-       struct res_srq *srq;
+       struct res_srq *srq = NULL;
 
        err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
        if (err)
@@ -4714,13 +4714,13 @@ static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
                                        break;
 
                                case RES_EQ_HW:
-                                       err = mlx4_cmd(dev, slave, eqn & 0xff,
+                                       err = mlx4_cmd(dev, slave, eqn & 0x3ff,
                                                       1, MLX4_CMD_HW2SW_EQ,
                                                       MLX4_CMD_TIME_CLASS_A,
                                                       MLX4_CMD_NATIVE);
                                        if (err)
                                                mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
-                                                        slave, eqn);
+                                                        slave, eqn & 0x3ff);
                                        atomic_dec(&eq->mtt->ref_count);
                                        state = RES_EQ_RESERVED;
                                        break;
index 8da7c3faf8178c05576c74833fc828f197104f52..7b43a3b4abdcbc7bc1cdfd4d13c611563e2760a2 100644 (file)
@@ -1764,7 +1764,7 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
        int done = 0;
        struct nx_host_tx_ring *tx_ring = adapter->tx_ring;
 
-       if (!spin_trylock(&adapter->tx_clean_lock))
+       if (!spin_trylock_bh(&adapter->tx_clean_lock))
                return 1;
 
        sw_consumer = tx_ring->sw_consumer;
@@ -1819,7 +1819,7 @@ int netxen_process_cmd_ring(struct netxen_adapter *adapter)
         */
        hw_consumer = le32_to_cpu(*(tx_ring->hw_consumer));
        done = (sw_consumer == hw_consumer);
-       spin_unlock(&adapter->tx_clean_lock);
+       spin_unlock_bh(&adapter->tx_clean_lock);
 
        return done;
 }
index e0c31e3947d1091371bfa742fbea5cee9743002d..6409a06bbdf633b0ce440bf817aabfe69311dd1e 100644 (file)
@@ -3025,9 +3025,9 @@ netxen_sysfs_read_dimm(struct file *filp, struct kobject *kobj,
        u8 dw, rows, cols, banks, ranks;
        u32 val;
 
-       if (size != sizeof(struct netxen_dimm_cfg)) {
+       if (size < attr->size) {
                netdev_err(netdev, "Invalid size\n");
-               return -1;
+               return -EINVAL;
        }
 
        memset(&dimm, 0, sizeof(struct netxen_dimm_cfg));
@@ -3137,7 +3137,7 @@ out:
 
 static struct bin_attribute bin_attr_dimm = {
        .attr = { .name = "dimm", .mode = (S_IRUGO | S_IWUSR) },
-       .size = 0,
+       .size = sizeof(struct netxen_dimm_cfg),
        .read = netxen_sysfs_read_dimm,
 };
 
index f66641d961e3bc18ae15a301323497ff9fb265e8..6af028d5f9bcbdcc3aae303a114fd64d87986eed 100644 (file)
@@ -912,6 +912,8 @@ qca_spi_probe(struct spi_device *spi_device)
        qca->spi_dev = spi_device;
        qca->legacy_mode = legacy_mode;
 
+       spi_set_drvdata(spi_device, qcaspi_devs);
+
        mac = of_get_mac_address(spi_device->dev.of_node);
 
        if (mac)
@@ -944,8 +946,6 @@ qca_spi_probe(struct spi_device *spi_device)
                return -EFAULT;
        }
 
-       spi_set_drvdata(spi_device, qcaspi_devs);
-
        qcaspi_init_device_debugfs(qca);
 
        return 0;
index c70ab40d86989974d54c9161bf7acd8558d93c74..3df51faf18ae3ba8ce6bb7f49e6f51e4da1be738 100644 (file)
@@ -6884,7 +6884,7 @@ static void r8169_csum_workaround(struct rtl8169_private *tp,
                        rtl8169_start_xmit(nskb, tp->dev);
                } while (segs);
 
-               dev_kfree_skb(skb);
+               dev_consume_skb_any(skb);
        } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
                if (skb_checksum_help(skb) < 0)
                        goto drop;
@@ -6896,7 +6896,7 @@ static void r8169_csum_workaround(struct rtl8169_private *tp,
 drop:
                stats = &tp->dev->stats;
                stats->tx_dropped++;
-               dev_kfree_skb(skb);
+               dev_kfree_skb_any(skb);
        }
 }
 
index ec251531bd9f8ecd1e64295b4f1f5c35fe475b79..cf98cc9bbc8dc9d57545bbbe25592f6878fcf324 100644 (file)
@@ -2921,10 +2921,11 @@ static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
        struct neighbour *n = __ipv4_neigh_lookup(dev, (__force u32)ip_addr);
        int err = 0;
 
-       if (!n)
+       if (!n) {
                n = neigh_create(&arp_tbl, &ip_addr, dev);
-       if (!n)
-               return -ENOMEM;
+               if (IS_ERR(n))
+                       return IS_ERR(n);
+       }
 
        /* If the neigh is already resolved, then go ahead and
         * install the entry, otherwise start the ARP process to
@@ -2936,6 +2937,7 @@ static int rocker_port_ipv4_resolve(struct rocker_port *rocker_port,
        else
                neigh_event_send(n, NULL);
 
+       neigh_release(n);
        return err;
 }
 
index 4b00545a3ace7784b3b3e668ccd68b4334c1500b..65944dd8bf6b11239e4945b6be7e91fdc547254a 100644 (file)
@@ -1304,7 +1304,7 @@ static unsigned int efx_wanted_parallelism(struct efx_nic *efx)
                        if (!cpumask_test_cpu(cpu, thread_mask)) {
                                ++count;
                                cpumask_or(thread_mask, thread_mask,
-                                          topology_thread_cpumask(cpu));
+                                          topology_sibling_cpumask(cpu));
                        }
                }
 
index c0ad95d2f63d9a12cd300aa0420ddda661ccaed1..809ea4610a77e774af0413d896e8ec802946d8fa 100644 (file)
@@ -224,12 +224,17 @@ static void efx_unmap_rx_buffer(struct efx_nic *efx,
        }
 }
 
-static void efx_free_rx_buffer(struct efx_rx_buffer *rx_buf)
+static void efx_free_rx_buffers(struct efx_rx_queue *rx_queue,
+                               struct efx_rx_buffer *rx_buf,
+                               unsigned int num_bufs)
 {
-       if (rx_buf->page) {
-               put_page(rx_buf->page);
-               rx_buf->page = NULL;
-       }
+       do {
+               if (rx_buf->page) {
+                       put_page(rx_buf->page);
+                       rx_buf->page = NULL;
+               }
+               rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
+       } while (--num_bufs);
 }
 
 /* Attempt to recycle the page if there is an RX recycle ring; the page can
@@ -278,7 +283,7 @@ static void efx_fini_rx_buffer(struct efx_rx_queue *rx_queue,
        /* If this is the last buffer in a page, unmap and free it. */
        if (rx_buf->flags & EFX_RX_BUF_LAST_IN_PAGE) {
                efx_unmap_rx_buffer(rx_queue->efx, rx_buf);
-               efx_free_rx_buffer(rx_buf);
+               efx_free_rx_buffers(rx_queue, rx_buf, 1);
        }
        rx_buf->page = NULL;
 }
@@ -304,10 +309,7 @@ static void efx_discard_rx_packet(struct efx_channel *channel,
 
        efx_recycle_rx_pages(channel, rx_buf, n_frags);
 
-       do {
-               efx_free_rx_buffer(rx_buf);
-               rx_buf = efx_rx_buf_next(rx_queue, rx_buf);
-       } while (--n_frags);
+       efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
 }
 
 /**
@@ -431,11 +433,10 @@ efx_rx_packet_gro(struct efx_channel *channel, struct efx_rx_buffer *rx_buf,
 
        skb = napi_get_frags(napi);
        if (unlikely(!skb)) {
-               while (n_frags--) {
-                       put_page(rx_buf->page);
-                       rx_buf->page = NULL;
-                       rx_buf = efx_rx_buf_next(&channel->rx_queue, rx_buf);
-               }
+               struct efx_rx_queue *rx_queue;
+
+               rx_queue = efx_channel_get_rx_queue(channel);
+               efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
                return;
        }
 
@@ -622,7 +623,10 @@ static void efx_rx_deliver(struct efx_channel *channel, u8 *eh,
 
        skb = efx_rx_mk_skb(channel, rx_buf, n_frags, eh, hdr_len);
        if (unlikely(skb == NULL)) {
-               efx_free_rx_buffer(rx_buf);
+               struct efx_rx_queue *rx_queue;
+
+               rx_queue = efx_channel_get_rx_queue(channel);
+               efx_free_rx_buffers(rx_queue, rx_buf, n_frags);
                return;
        }
        skb_record_rx_queue(skb, channel->rx_queue.core_index);
@@ -661,8 +665,12 @@ void __efx_rx_packet(struct efx_channel *channel)
         * loopback layer, and free the rx_buf here
         */
        if (unlikely(efx->loopback_selftest)) {
+               struct efx_rx_queue *rx_queue;
+
                efx_loopback_rx_packet(efx, eh, rx_buf->len);
-               efx_free_rx_buffer(rx_buf);
+               rx_queue = efx_channel_get_rx_queue(channel);
+               efx_free_rx_buffers(rx_queue, rx_buf,
+                                   channel->rx_pkt_n_frags);
                goto out;
        }
 
index 14b363a25c023c70f13b73e9c485bf28e9d533e2..630f0b7800e47e085c5ffb2db6ac23efef9ad08c 100644 (file)
@@ -2238,9 +2238,10 @@ static int smc_drv_probe(struct platform_device *pdev)
        const struct of_device_id *match = NULL;
        struct smc_local *lp;
        struct net_device *ndev;
-       struct resource *res, *ires;
+       struct resource *res;
        unsigned int __iomem *addr;
        unsigned long irq_flags = SMC_IRQ_FLAGS;
+       unsigned long irq_resflags;
        int ret;
 
        ndev = alloc_etherdev(sizeof(struct smc_local));
@@ -2332,16 +2333,19 @@ static int smc_drv_probe(struct platform_device *pdev)
                goto out_free_netdev;
        }
 
-       ires = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-       if (!ires) {
+       ndev->irq = platform_get_irq(pdev, 0);
+       if (ndev->irq <= 0) {
                ret = -ENODEV;
                goto out_release_io;
        }
-
-       ndev->irq = ires->start;
-
-       if (irq_flags == -1 || ires->flags & IRQF_TRIGGER_MASK)
-               irq_flags = ires->flags & IRQF_TRIGGER_MASK;
+       /*
+        * If this platform does not specify any special irqflags, or if
+        * the resource supplies a trigger, override the irqflags with
+        * the trigger flags from the resource.
+        */
+       irq_resflags = irqd_get_trigger_type(irq_get_irq_data(ndev->irq));
+       if (irq_flags == -1 || irq_resflags & IRQF_TRIGGER_MASK)
+               irq_flags = irq_resflags & IRQF_TRIGGER_MASK;
 
        ret = smc_request_attrib(pdev, ndev);
        if (ret)
index 41047c9143d0a66cde1441311fb5feb3ce0796d0..959aeeade0c97b8cbf5ee27024ecda185c6258b2 100644 (file)
@@ -2418,9 +2418,9 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
        struct net_device *dev;
        struct smsc911x_data *pdata;
        struct smsc911x_platform_config *config = dev_get_platdata(&pdev->dev);
-       struct resource *res, *irq_res;
+       struct resource *res;
        unsigned int intcfg = 0;
-       int res_size, irq_flags;
+       int res_size, irq, irq_flags;
        int retval;
 
        res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
@@ -2434,8 +2434,8 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
        }
        res_size = resource_size(res);
 
-       irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
-       if (!irq_res) {
+       irq = platform_get_irq(pdev, 0);
+       if (irq <= 0) {
                pr_warn("Could not allocate irq resource\n");
                retval = -ENODEV;
                goto out_0;
@@ -2455,8 +2455,8 @@ static int smsc911x_drv_probe(struct platform_device *pdev)
        SET_NETDEV_DEV(dev, &pdev->dev);
 
        pdata = netdev_priv(dev);
-       dev->irq = irq_res->start;
-       irq_flags = irq_res->flags & IRQF_TRIGGER_MASK;
+       dev->irq = irq;
+       irq_flags = irq_get_trigger_type(irq);
        pdata->ioaddr = ioremap_nocache(res->start, res_size);
 
        pdata->dev = dev;
index 2ac9552d1fa385953e261ff3797c74b8d5ad4add..73bab983edd96a47169bf4b1957e5fd13c28a3a0 100644 (file)
@@ -117,6 +117,12 @@ struct stmmac_priv {
        int use_riwt;
        int irq_wake;
        spinlock_t ptp_lock;
+
+#ifdef CONFIG_DEBUG_FS
+       struct dentry *dbgfs_dir;
+       struct dentry *dbgfs_rings_status;
+       struct dentry *dbgfs_dma_cap;
+#endif
 };
 
 int stmmac_mdio_unregister(struct net_device *ndev);
index 05c146f718a36551c4fe4ada4871f2612f16571d..2c5ce2baca8712790d51096a53868b84466f7dde 100644 (file)
@@ -118,7 +118,7 @@ static irqreturn_t stmmac_interrupt(int irq, void *dev_id);
 
 #ifdef CONFIG_DEBUG_FS
 static int stmmac_init_fs(struct net_device *dev);
-static void stmmac_exit_fs(void);
+static void stmmac_exit_fs(struct net_device *dev);
 #endif
 
 #define STMMAC_COAL_TIMER(x) (jiffies + usecs_to_jiffies(x))
@@ -1916,7 +1916,7 @@ static int stmmac_release(struct net_device *dev)
        netif_carrier_off(dev);
 
 #ifdef CONFIG_DEBUG_FS
-       stmmac_exit_fs();
+       stmmac_exit_fs(dev);
 #endif
 
        stmmac_release_ptp(priv);
@@ -2508,8 +2508,6 @@ static int stmmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
 
 #ifdef CONFIG_DEBUG_FS
 static struct dentry *stmmac_fs_dir;
-static struct dentry *stmmac_rings_status;
-static struct dentry *stmmac_dma_cap;
 
 static void sysfs_display_ring(void *head, int size, int extend_desc,
                               struct seq_file *seq)
@@ -2648,36 +2646,39 @@ static const struct file_operations stmmac_dma_cap_fops = {
 
 static int stmmac_init_fs(struct net_device *dev)
 {
-       /* Create debugfs entries */
-       stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
+       struct stmmac_priv *priv = netdev_priv(dev);
+
+       /* Create per netdev entries */
+       priv->dbgfs_dir = debugfs_create_dir(dev->name, stmmac_fs_dir);
 
-       if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
-               pr_err("ERROR %s, debugfs create directory failed\n",
-                      STMMAC_RESOURCE_NAME);
+       if (!priv->dbgfs_dir || IS_ERR(priv->dbgfs_dir)) {
+               pr_err("ERROR %s/%s, debugfs create directory failed\n",
+                      STMMAC_RESOURCE_NAME, dev->name);
 
                return -ENOMEM;
        }
 
        /* Entry to report DMA RX/TX rings */
-       stmmac_rings_status = debugfs_create_file("descriptors_status",
-                                                 S_IRUGO, stmmac_fs_dir, dev,
-                                                 &stmmac_rings_status_fops);
+       priv->dbgfs_rings_status =
+               debugfs_create_file("descriptors_status", S_IRUGO,
+                                   priv->dbgfs_dir, dev,
+                                   &stmmac_rings_status_fops);
 
-       if (!stmmac_rings_status || IS_ERR(stmmac_rings_status)) {
+       if (!priv->dbgfs_rings_status || IS_ERR(priv->dbgfs_rings_status)) {
                pr_info("ERROR creating stmmac ring debugfs file\n");
-               debugfs_remove(stmmac_fs_dir);
+               debugfs_remove_recursive(priv->dbgfs_dir);
 
                return -ENOMEM;
        }
 
        /* Entry to report the DMA HW features */
-       stmmac_dma_cap = debugfs_create_file("dma_cap", S_IRUGO, stmmac_fs_dir,
-                                            dev, &stmmac_dma_cap_fops);
+       priv->dbgfs_dma_cap = debugfs_create_file("dma_cap", S_IRUGO,
+                                           priv->dbgfs_dir,
+                                           dev, &stmmac_dma_cap_fops);
 
-       if (!stmmac_dma_cap || IS_ERR(stmmac_dma_cap)) {
+       if (!priv->dbgfs_dma_cap || IS_ERR(priv->dbgfs_dma_cap)) {
                pr_info("ERROR creating stmmac MMC debugfs file\n");
-               debugfs_remove(stmmac_rings_status);
-               debugfs_remove(stmmac_fs_dir);
+               debugfs_remove_recursive(priv->dbgfs_dir);
 
                return -ENOMEM;
        }
@@ -2685,11 +2686,11 @@ static int stmmac_init_fs(struct net_device *dev)
        return 0;
 }
 
-static void stmmac_exit_fs(void)
+static void stmmac_exit_fs(struct net_device *dev)
 {
-       debugfs_remove(stmmac_rings_status);
-       debugfs_remove(stmmac_dma_cap);
-       debugfs_remove(stmmac_fs_dir);
+       struct stmmac_priv *priv = netdev_priv(dev);
+
+       debugfs_remove_recursive(priv->dbgfs_dir);
 }
 #endif /* CONFIG_DEBUG_FS */
 
@@ -3149,6 +3150,35 @@ err:
 __setup("stmmaceth=", stmmac_cmdline_opt);
 #endif /* MODULE */
 
+static int __init stmmac_init(void)
+{
+#ifdef CONFIG_DEBUG_FS
+       /* Create debugfs main directory if it doesn't exist yet */
+       if (!stmmac_fs_dir) {
+               stmmac_fs_dir = debugfs_create_dir(STMMAC_RESOURCE_NAME, NULL);
+
+               if (!stmmac_fs_dir || IS_ERR(stmmac_fs_dir)) {
+                       pr_err("ERROR %s, debugfs create directory failed\n",
+                              STMMAC_RESOURCE_NAME);
+
+                       return -ENOMEM;
+               }
+       }
+#endif
+
+       return 0;
+}
+
+static void __exit stmmac_exit(void)
+{
+#ifdef CONFIG_DEBUG_FS
+       debugfs_remove_recursive(stmmac_fs_dir);
+#endif
+}
+
+module_init(stmmac_init)
+module_exit(stmmac_exit)
+
 MODULE_DESCRIPTION("STMMAC 10/100/1000 Ethernet device driver");
 MODULE_AUTHOR("Giuseppe Cavallaro <peppe.cavallaro@st.com>");
 MODULE_LICENSE("GPL");
index 705bbdf9394058944927fd575d1dfc47cdb283d5..68aec5c460db46c1378cdf122c3ad8f4eba15e86 100644 (file)
@@ -23,6 +23,7 @@
 *******************************************************************************/
 
 #include <linux/platform_device.h>
+#include <linux/module.h>
 #include <linux/io.h>
 #include <linux/of.h>
 #include <linux/of_net.h>
index 690a4c36b3166c76b4d8ed63f9d21574ecac8bb3..af2694dc6f90146fc2afe9073a0dde7058731f59 100644 (file)
@@ -707,8 +707,8 @@ static int temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
 
        cur_p->app0 |= STS_CTRL_APP0_SOP;
        cur_p->len = skb_headlen(skb);
-       cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, skb->len,
-                                    DMA_TO_DEVICE);
+       cur_p->phys = dma_map_single(ndev->dev.parent, skb->data,
+                                    skb_headlen(skb), DMA_TO_DEVICE);
        cur_p->app4 = (unsigned long)skb;
 
        for (ii = 0; ii < num_frag; ii++) {
index 2d9ef533cc4837c5bd7b46c96958f9f1b9eda323..1e09243d5449d4f85b9c6e1eb9b0812c04c241f8 100644 (file)
@@ -28,6 +28,7 @@
 #include <linux/slab.h>
 #include <linux/netdevice.h>
 #include <linux/if_ether.h>
+#include <linux/vmalloc.h>
 #include <asm/sync_bitops.h>
 
 #include "hyperv_net.h"
@@ -826,7 +827,6 @@ int netvsc_send(struct hv_device *device,
        u16 q_idx = packet->q_idx;
        u32 pktlen = packet->total_data_buflen, msd_len = 0;
        unsigned int section_index = NETVSC_INVALID_INDEX;
-       struct sk_buff *skb = NULL;
        unsigned long flag;
        struct multi_send_data *msdp;
        struct hv_netvsc_packet *msd_send = NULL, *cur_send = NULL;
@@ -924,12 +924,8 @@ int netvsc_send(struct hv_device *device,
        if (cur_send)
                ret = netvsc_send_pkt(cur_send, net_device);
 
-       if (ret != 0) {
-               if (section_index != NETVSC_INVALID_INDEX)
-                       netvsc_free_send_slot(net_device, section_index);
-       } else if (skb) {
-               dev_kfree_skb_any(skb);
-       }
+       if (ret != 0 && section_index != NETVSC_INVALID_INDEX)
+               netvsc_free_send_slot(net_device, section_index);
 
        return ret;
 }
index 9118cea918821cb6bbe83a2f97a71134a58fd5dd..35a482d526d9c5d0860e5a55e0a65f107e89ebd3 100644 (file)
@@ -27,6 +27,7 @@
 #include <linux/netdevice.h>
 #include <linux/if_vlan.h>
 #include <linux/nls.h>
+#include <linux/vmalloc.h>
 
 #include "hyperv_net.h"
 
index 38026650c0387ecb101d085e24273bf24ded2783..67d00fbc2e0e29e7bd426ed8f7d21b22bf6772fc 100644 (file)
@@ -85,6 +85,7 @@ struct at86rf230_local {
        struct ieee802154_hw *hw;
        struct at86rf2xx_chip_data *data;
        struct regmap *regmap;
+       int slp_tr;
 
        struct completion state_complete;
        struct at86rf230_state_change state;
@@ -95,163 +96,164 @@ struct at86rf230_local {
        unsigned long cal_timeout;
        s8 max_frame_retries;
        bool is_tx;
+       bool is_tx_from_off;
        u8 tx_retry;
        struct sk_buff *tx_skb;
        struct at86rf230_state_change tx;
 };
 
-#define        RG_TRX_STATUS   (0x01)
-#define        SR_TRX_STATUS           0x01, 0x1f, 0
-#define        SR_RESERVED_01_3        0x01, 0x20, 5
-#define        SR_CCA_STATUS           0x01, 0x40, 6
-#define        SR_CCA_DONE             0x01, 0x80, 7
-#define        RG_TRX_STATE    (0x02)
-#define        SR_TRX_CMD              0x02, 0x1f, 0
-#define        SR_TRAC_STATUS          0x02, 0xe0, 5
-#define        RG_TRX_CTRL_0   (0x03)
-#define        SR_CLKM_CTRL            0x03, 0x07, 0
-#define        SR_CLKM_SHA_SEL         0x03, 0x08, 3
-#define        SR_PAD_IO_CLKM          0x03, 0x30, 4
-#define        SR_PAD_IO               0x03, 0xc0, 6
-#define        RG_TRX_CTRL_1   (0x04)
-#define        SR_IRQ_POLARITY         0x04, 0x01, 0
-#define        SR_IRQ_MASK_MODE        0x04, 0x02, 1
-#define        SR_SPI_CMD_MODE         0x04, 0x0c, 2
-#define        SR_RX_BL_CTRL           0x04, 0x10, 4
-#define        SR_TX_AUTO_CRC_ON       0x04, 0x20, 5
-#define        SR_IRQ_2_EXT_EN         0x04, 0x40, 6
-#define        SR_PA_EXT_EN            0x04, 0x80, 7
-#define        RG_PHY_TX_PWR   (0x05)
-#define        SR_TX_PWR               0x05, 0x0f, 0
-#define        SR_PA_LT                0x05, 0x30, 4
-#define        SR_PA_BUF_LT            0x05, 0xc0, 6
-#define        RG_PHY_RSSI     (0x06)
-#define        SR_RSSI                 0x06, 0x1f, 0
-#define        SR_RND_VALUE            0x06, 0x60, 5
-#define        SR_RX_CRC_VALID         0x06, 0x80, 7
-#define        RG_PHY_ED_LEVEL (0x07)
-#define        SR_ED_LEVEL             0x07, 0xff, 0
-#define        RG_PHY_CC_CCA   (0x08)
-#define        SR_CHANNEL              0x08, 0x1f, 0
-#define        SR_CCA_MODE             0x08, 0x60, 5
-#define        SR_CCA_REQUEST          0x08, 0x80, 7
-#define        RG_CCA_THRES    (0x09)
-#define        SR_CCA_ED_THRES         0x09, 0x0f, 0
-#define        SR_RESERVED_09_1        0x09, 0xf0, 4
-#define        RG_RX_CTRL      (0x0a)
-#define        SR_PDT_THRES            0x0a, 0x0f, 0
-#define        SR_RESERVED_0a_1        0x0a, 0xf0, 4
-#define        RG_SFD_VALUE    (0x0b)
-#define        SR_SFD_VALUE            0x0b, 0xff, 0
-#define        RG_TRX_CTRL_2   (0x0c)
-#define        SR_OQPSK_DATA_RATE      0x0c, 0x03, 0
-#define        SR_SUB_MODE             0x0c, 0x04, 2
-#define        SR_BPSK_QPSK            0x0c, 0x08, 3
-#define        SR_OQPSK_SUB1_RC_EN     0x0c, 0x10, 4
-#define        SR_RESERVED_0c_5        0x0c, 0x60, 5
-#define        SR_RX_SAFE_MODE         0x0c, 0x80, 7
-#define        RG_ANT_DIV      (0x0d)
-#define        SR_ANT_CTRL             0x0d, 0x03, 0
-#define        SR_ANT_EXT_SW_EN        0x0d, 0x04, 2
-#define        SR_ANT_DIV_EN           0x0d, 0x08, 3
-#define        SR_RESERVED_0d_2        0x0d, 0x70, 4
-#define        SR_ANT_SEL              0x0d, 0x80, 7
-#define        RG_IRQ_MASK     (0x0e)
-#define        SR_IRQ_MASK             0x0e, 0xff, 0
-#define        RG_IRQ_STATUS   (0x0f)
-#define        SR_IRQ_0_PLL_LOCK       0x0f, 0x01, 0
-#define        SR_IRQ_1_PLL_UNLOCK     0x0f, 0x02, 1
-#define        SR_IRQ_2_RX_START       0x0f, 0x04, 2
-#define        SR_IRQ_3_TRX_END        0x0f, 0x08, 3
-#define        SR_IRQ_4_CCA_ED_DONE    0x0f, 0x10, 4
-#define        SR_IRQ_5_AMI            0x0f, 0x20, 5
-#define        SR_IRQ_6_TRX_UR         0x0f, 0x40, 6
-#define        SR_IRQ_7_BAT_LOW        0x0f, 0x80, 7
-#define        RG_VREG_CTRL    (0x10)
-#define        SR_RESERVED_10_6        0x10, 0x03, 0
-#define        SR_DVDD_OK              0x10, 0x04, 2
-#define        SR_DVREG_EXT            0x10, 0x08, 3
-#define        SR_RESERVED_10_3        0x10, 0x30, 4
-#define        SR_AVDD_OK              0x10, 0x40, 6
-#define        SR_AVREG_EXT            0x10, 0x80, 7
-#define        RG_BATMON       (0x11)
-#define        SR_BATMON_VTH           0x11, 0x0f, 0
-#define        SR_BATMON_HR            0x11, 0x10, 4
-#define        SR_BATMON_OK            0x11, 0x20, 5
-#define        SR_RESERVED_11_1        0x11, 0xc0, 6
-#define        RG_XOSC_CTRL    (0x12)
-#define        SR_XTAL_TRIM            0x12, 0x0f, 0
-#define        SR_XTAL_MODE            0x12, 0xf0, 4
-#define        RG_RX_SYN       (0x15)
-#define        SR_RX_PDT_LEVEL         0x15, 0x0f, 0
-#define        SR_RESERVED_15_2        0x15, 0x70, 4
-#define        SR_RX_PDT_DIS           0x15, 0x80, 7
-#define        RG_XAH_CTRL_1   (0x17)
-#define        SR_RESERVED_17_8        0x17, 0x01, 0
-#define        SR_AACK_PROM_MODE       0x17, 0x02, 1
-#define        SR_AACK_ACK_TIME        0x17, 0x04, 2
-#define        SR_RESERVED_17_5        0x17, 0x08, 3
-#define        SR_AACK_UPLD_RES_FT     0x17, 0x10, 4
-#define        SR_AACK_FLTR_RES_FT     0x17, 0x20, 5
-#define        SR_CSMA_LBT_MODE        0x17, 0x40, 6
-#define        SR_RESERVED_17_1        0x17, 0x80, 7
-#define        RG_FTN_CTRL     (0x18)
-#define        SR_RESERVED_18_2        0x18, 0x7f, 0
-#define        SR_FTN_START            0x18, 0x80, 7
-#define        RG_PLL_CF       (0x1a)
-#define        SR_RESERVED_1a_2        0x1a, 0x7f, 0
-#define        SR_PLL_CF_START         0x1a, 0x80, 7
-#define        RG_PLL_DCU      (0x1b)
-#define        SR_RESERVED_1b_3        0x1b, 0x3f, 0
-#define        SR_RESERVED_1b_2        0x1b, 0x40, 6
-#define        SR_PLL_DCU_START        0x1b, 0x80, 7
-#define        RG_PART_NUM     (0x1c)
-#define        SR_PART_NUM             0x1c, 0xff, 0
-#define        RG_VERSION_NUM  (0x1d)
-#define        SR_VERSION_NUM          0x1d, 0xff, 0
-#define        RG_MAN_ID_0     (0x1e)
-#define        SR_MAN_ID_0             0x1e, 0xff, 0
-#define        RG_MAN_ID_1     (0x1f)
-#define        SR_MAN_ID_1             0x1f, 0xff, 0
-#define        RG_SHORT_ADDR_0 (0x20)
-#define        SR_SHORT_ADDR_0         0x20, 0xff, 0
-#define        RG_SHORT_ADDR_1 (0x21)
-#define        SR_SHORT_ADDR_1         0x21, 0xff, 0
-#define        RG_PAN_ID_0     (0x22)
-#define        SR_PAN_ID_0             0x22, 0xff, 0
-#define        RG_PAN_ID_1     (0x23)
-#define        SR_PAN_ID_1             0x23, 0xff, 0
-#define        RG_IEEE_ADDR_0  (0x24)
-#define        SR_IEEE_ADDR_0          0x24, 0xff, 0
-#define        RG_IEEE_ADDR_1  (0x25)
-#define        SR_IEEE_ADDR_1          0x25, 0xff, 0
-#define        RG_IEEE_ADDR_2  (0x26)
-#define        SR_IEEE_ADDR_2          0x26, 0xff, 0
-#define        RG_IEEE_ADDR_3  (0x27)
-#define        SR_IEEE_ADDR_3          0x27, 0xff, 0
-#define        RG_IEEE_ADDR_4  (0x28)
-#define        SR_IEEE_ADDR_4          0x28, 0xff, 0
-#define        RG_IEEE_ADDR_5  (0x29)
-#define        SR_IEEE_ADDR_5          0x29, 0xff, 0
-#define        RG_IEEE_ADDR_6  (0x2a)
-#define        SR_IEEE_ADDR_6          0x2a, 0xff, 0
-#define        RG_IEEE_ADDR_7  (0x2b)
-#define        SR_IEEE_ADDR_7          0x2b, 0xff, 0
-#define        RG_XAH_CTRL_0   (0x2c)
-#define        SR_SLOTTED_OPERATION    0x2c, 0x01, 0
-#define        SR_MAX_CSMA_RETRIES     0x2c, 0x0e, 1
-#define        SR_MAX_FRAME_RETRIES    0x2c, 0xf0, 4
-#define        RG_CSMA_SEED_0  (0x2d)
-#define        SR_CSMA_SEED_0          0x2d, 0xff, 0
-#define        RG_CSMA_SEED_1  (0x2e)
-#define        SR_CSMA_SEED_1          0x2e, 0x07, 0
-#define        SR_AACK_I_AM_COORD      0x2e, 0x08, 3
-#define        SR_AACK_DIS_ACK         0x2e, 0x10, 4
-#define        SR_AACK_SET_PD          0x2e, 0x20, 5
-#define        SR_AACK_FVN_MODE        0x2e, 0xc0, 6
-#define        RG_CSMA_BE      (0x2f)
-#define        SR_MIN_BE               0x2f, 0x0f, 0
-#define        SR_MAX_BE               0x2f, 0xf0, 4
+#define RG_TRX_STATUS  (0x01)
+#define SR_TRX_STATUS          0x01, 0x1f, 0
+#define SR_RESERVED_01_3       0x01, 0x20, 5
+#define SR_CCA_STATUS          0x01, 0x40, 6
+#define SR_CCA_DONE            0x01, 0x80, 7
+#define RG_TRX_STATE   (0x02)
+#define SR_TRX_CMD             0x02, 0x1f, 0
+#define SR_TRAC_STATUS         0x02, 0xe0, 5
+#define RG_TRX_CTRL_0  (0x03)
+#define SR_CLKM_CTRL           0x03, 0x07, 0
+#define SR_CLKM_SHA_SEL                0x03, 0x08, 3
+#define SR_PAD_IO_CLKM         0x03, 0x30, 4
+#define SR_PAD_IO              0x03, 0xc0, 6
+#define RG_TRX_CTRL_1  (0x04)
+#define SR_IRQ_POLARITY                0x04, 0x01, 0
+#define SR_IRQ_MASK_MODE       0x04, 0x02, 1
+#define SR_SPI_CMD_MODE                0x04, 0x0c, 2
+#define SR_RX_BL_CTRL          0x04, 0x10, 4
+#define SR_TX_AUTO_CRC_ON      0x04, 0x20, 5
+#define SR_IRQ_2_EXT_EN                0x04, 0x40, 6
+#define SR_PA_EXT_EN           0x04, 0x80, 7
+#define RG_PHY_TX_PWR  (0x05)
+#define SR_TX_PWR              0x05, 0x0f, 0
+#define SR_PA_LT               0x05, 0x30, 4
+#define SR_PA_BUF_LT           0x05, 0xc0, 6
+#define RG_PHY_RSSI    (0x06)
+#define SR_RSSI                        0x06, 0x1f, 0
+#define SR_RND_VALUE           0x06, 0x60, 5
+#define SR_RX_CRC_VALID                0x06, 0x80, 7
+#define RG_PHY_ED_LEVEL        (0x07)
+#define SR_ED_LEVEL            0x07, 0xff, 0
+#define RG_PHY_CC_CCA  (0x08)
+#define SR_CHANNEL             0x08, 0x1f, 0
+#define SR_CCA_MODE            0x08, 0x60, 5
+#define SR_CCA_REQUEST         0x08, 0x80, 7
+#define RG_CCA_THRES   (0x09)
+#define SR_CCA_ED_THRES                0x09, 0x0f, 0
+#define SR_RESERVED_09_1       0x09, 0xf0, 4
+#define RG_RX_CTRL     (0x0a)
+#define SR_PDT_THRES           0x0a, 0x0f, 0
+#define SR_RESERVED_0a_1       0x0a, 0xf0, 4
+#define RG_SFD_VALUE   (0x0b)
+#define SR_SFD_VALUE           0x0b, 0xff, 0
+#define RG_TRX_CTRL_2  (0x0c)
+#define SR_OQPSK_DATA_RATE     0x0c, 0x03, 0
+#define SR_SUB_MODE            0x0c, 0x04, 2
+#define SR_BPSK_QPSK           0x0c, 0x08, 3
+#define SR_OQPSK_SUB1_RC_EN    0x0c, 0x10, 4
+#define SR_RESERVED_0c_5       0x0c, 0x60, 5
+#define SR_RX_SAFE_MODE                0x0c, 0x80, 7
+#define RG_ANT_DIV     (0x0d)
+#define SR_ANT_CTRL            0x0d, 0x03, 0
+#define SR_ANT_EXT_SW_EN       0x0d, 0x04, 2
+#define SR_ANT_DIV_EN          0x0d, 0x08, 3
+#define SR_RESERVED_0d_2       0x0d, 0x70, 4
+#define SR_ANT_SEL             0x0d, 0x80, 7
+#define RG_IRQ_MASK    (0x0e)
+#define SR_IRQ_MASK            0x0e, 0xff, 0
+#define RG_IRQ_STATUS  (0x0f)
+#define SR_IRQ_0_PLL_LOCK      0x0f, 0x01, 0
+#define SR_IRQ_1_PLL_UNLOCK    0x0f, 0x02, 1
+#define SR_IRQ_2_RX_START      0x0f, 0x04, 2
+#define SR_IRQ_3_TRX_END       0x0f, 0x08, 3
+#define SR_IRQ_4_CCA_ED_DONE   0x0f, 0x10, 4
+#define SR_IRQ_5_AMI           0x0f, 0x20, 5
+#define SR_IRQ_6_TRX_UR                0x0f, 0x40, 6
+#define SR_IRQ_7_BAT_LOW       0x0f, 0x80, 7
+#define RG_VREG_CTRL   (0x10)
+#define SR_RESERVED_10_6       0x10, 0x03, 0
+#define SR_DVDD_OK             0x10, 0x04, 2
+#define SR_DVREG_EXT           0x10, 0x08, 3
+#define SR_RESERVED_10_3       0x10, 0x30, 4
+#define SR_AVDD_OK             0x10, 0x40, 6
+#define SR_AVREG_EXT           0x10, 0x80, 7
+#define RG_BATMON      (0x11)
+#define SR_BATMON_VTH          0x11, 0x0f, 0
+#define SR_BATMON_HR           0x11, 0x10, 4
+#define SR_BATMON_OK           0x11, 0x20, 5
+#define SR_RESERVED_11_1       0x11, 0xc0, 6
+#define RG_XOSC_CTRL   (0x12)
+#define SR_XTAL_TRIM           0x12, 0x0f, 0
+#define SR_XTAL_MODE           0x12, 0xf0, 4
+#define RG_RX_SYN      (0x15)
+#define SR_RX_PDT_LEVEL                0x15, 0x0f, 0
+#define SR_RESERVED_15_2       0x15, 0x70, 4
+#define SR_RX_PDT_DIS          0x15, 0x80, 7
+#define RG_XAH_CTRL_1  (0x17)
+#define SR_RESERVED_17_8       0x17, 0x01, 0
+#define SR_AACK_PROM_MODE      0x17, 0x02, 1
+#define SR_AACK_ACK_TIME       0x17, 0x04, 2
+#define SR_RESERVED_17_5       0x17, 0x08, 3
+#define SR_AACK_UPLD_RES_FT    0x17, 0x10, 4
+#define SR_AACK_FLTR_RES_FT    0x17, 0x20, 5
+#define SR_CSMA_LBT_MODE       0x17, 0x40, 6
+#define SR_RESERVED_17_1       0x17, 0x80, 7
+#define RG_FTN_CTRL    (0x18)
+#define SR_RESERVED_18_2       0x18, 0x7f, 0
+#define SR_FTN_START           0x18, 0x80, 7
+#define RG_PLL_CF      (0x1a)
+#define SR_RESERVED_1a_2       0x1a, 0x7f, 0
+#define SR_PLL_CF_START                0x1a, 0x80, 7
+#define RG_PLL_DCU     (0x1b)
+#define SR_RESERVED_1b_3       0x1b, 0x3f, 0
+#define SR_RESERVED_1b_2       0x1b, 0x40, 6
+#define SR_PLL_DCU_START       0x1b, 0x80, 7
+#define RG_PART_NUM    (0x1c)
+#define SR_PART_NUM            0x1c, 0xff, 0
+#define RG_VERSION_NUM (0x1d)
+#define SR_VERSION_NUM         0x1d, 0xff, 0
+#define RG_MAN_ID_0    (0x1e)
+#define SR_MAN_ID_0            0x1e, 0xff, 0
+#define RG_MAN_ID_1    (0x1f)
+#define SR_MAN_ID_1            0x1f, 0xff, 0
+#define RG_SHORT_ADDR_0        (0x20)
+#define SR_SHORT_ADDR_0                0x20, 0xff, 0
+#define RG_SHORT_ADDR_1        (0x21)
+#define SR_SHORT_ADDR_1                0x21, 0xff, 0
+#define RG_PAN_ID_0    (0x22)
+#define SR_PAN_ID_0            0x22, 0xff, 0
+#define RG_PAN_ID_1    (0x23)
+#define SR_PAN_ID_1            0x23, 0xff, 0
+#define RG_IEEE_ADDR_0 (0x24)
+#define SR_IEEE_ADDR_0         0x24, 0xff, 0
+#define RG_IEEE_ADDR_1 (0x25)
+#define SR_IEEE_ADDR_1         0x25, 0xff, 0
+#define RG_IEEE_ADDR_2 (0x26)
+#define SR_IEEE_ADDR_2         0x26, 0xff, 0
+#define RG_IEEE_ADDR_3 (0x27)
+#define SR_IEEE_ADDR_3         0x27, 0xff, 0
+#define RG_IEEE_ADDR_4 (0x28)
+#define SR_IEEE_ADDR_4         0x28, 0xff, 0
+#define RG_IEEE_ADDR_5 (0x29)
+#define SR_IEEE_ADDR_5         0x29, 0xff, 0
+#define RG_IEEE_ADDR_6 (0x2a)
+#define SR_IEEE_ADDR_6         0x2a, 0xff, 0
+#define RG_IEEE_ADDR_7 (0x2b)
+#define SR_IEEE_ADDR_7         0x2b, 0xff, 0
+#define RG_XAH_CTRL_0  (0x2c)
+#define SR_SLOTTED_OPERATION   0x2c, 0x01, 0
+#define SR_MAX_CSMA_RETRIES    0x2c, 0x0e, 1
+#define SR_MAX_FRAME_RETRIES   0x2c, 0xf0, 4
+#define RG_CSMA_SEED_0 (0x2d)
+#define SR_CSMA_SEED_0         0x2d, 0xff, 0
+#define RG_CSMA_SEED_1 (0x2e)
+#define SR_CSMA_SEED_1         0x2e, 0x07, 0
+#define SR_AACK_I_AM_COORD     0x2e, 0x08, 3
+#define SR_AACK_DIS_ACK                0x2e, 0x10, 4
+#define SR_AACK_SET_PD         0x2e, 0x20, 5
+#define SR_AACK_FVN_MODE       0x2e, 0xc0, 6
+#define RG_CSMA_BE     (0x2f)
+#define SR_MIN_BE              0x2f, 0x0f, 0
+#define SR_MAX_BE              0x2f, 0xf0, 4
 
 #define CMD_REG                0x80
 #define CMD_REG_MASK   0x3f
@@ -292,6 +294,8 @@ struct at86rf230_local {
 #define STATE_BUSY_RX_AACK_NOCLK 0x1E
 #define STATE_TRANSITION_IN_PROGRESS 0x1F
 
+#define TRX_STATE_MASK         (0x1F)
+
 #define AT86RF2XX_NUMREGS 0x3F
 
 static void
@@ -336,6 +340,14 @@ at86rf230_write_subreg(struct at86rf230_local *lp,
        return regmap_update_bits(lp->regmap, addr, mask, data << shift);
 }
 
+static inline void
+at86rf230_slp_tr_rising_edge(struct at86rf230_local *lp)
+{
+       gpio_set_value(lp->slp_tr, 1);
+       udelay(1);
+       gpio_set_value(lp->slp_tr, 0);
+}
+
 static bool
 at86rf230_reg_writeable(struct device *dev, unsigned int reg)
 {
@@ -509,7 +521,7 @@ at86rf230_async_state_assert(void *context)
        struct at86rf230_state_change *ctx = context;
        struct at86rf230_local *lp = ctx->lp;
        const u8 *buf = ctx->buf;
-       const u8 trx_state = buf[1] & 0x1f;
+       const u8 trx_state = buf[1] & TRX_STATE_MASK;
 
        /* Assert state change */
        if (trx_state != ctx->to_state) {
@@ -609,11 +621,17 @@ at86rf230_async_state_delay(void *context)
                switch (ctx->to_state) {
                case STATE_RX_AACK_ON:
                        tim = ktime_set(0, c->t_off_to_aack * NSEC_PER_USEC);
+                       /* state change from TRX_OFF to RX_AACK_ON to do a
+                        * calibration, we need to reset the timeout for the
+                        * next one.
+                        */
+                       lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT;
                        goto change;
+               case STATE_TX_ARET_ON:
                case STATE_TX_ON:
                        tim = ktime_set(0, c->t_off_to_tx_on * NSEC_PER_USEC);
-                       /* state change from TRX_OFF to TX_ON to do a
-                        * calibration, we need to reset the timeout for the
+                       /* state change from TRX_OFF to TX_ON or ARET_ON to do
+                        * calibration, we need to reset the timeout for the
                         * next one.
                         */
                        lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT;
@@ -667,7 +685,7 @@ at86rf230_async_state_change_start(void *context)
        struct at86rf230_state_change *ctx = context;
        struct at86rf230_local *lp = ctx->lp;
        u8 *buf = ctx->buf;
-       const u8 trx_state = buf[1] & 0x1f;
+       const u8 trx_state = buf[1] & TRX_STATE_MASK;
        int rc;
 
        /* Check for "possible" STATE_TRANSITION_IN_PROGRESS */
@@ -772,16 +790,6 @@ at86rf230_tx_on(void *context)
                                     at86rf230_tx_complete, true);
 }
 
-static void
-at86rf230_tx_trac_error(void *context)
-{
-       struct at86rf230_state_change *ctx = context;
-       struct at86rf230_local *lp = ctx->lp;
-
-       at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
-                                    at86rf230_tx_on, true);
-}
-
 static void
 at86rf230_tx_trac_check(void *context)
 {
@@ -791,12 +799,12 @@ at86rf230_tx_trac_check(void *context)
        const u8 trac = (buf[1] & 0xe0) >> 5;
 
        /* If trac status is different than zero we need to do a state change
-        * to STATE_FORCE_TRX_OFF then STATE_TX_ON to recover the transceiver
-        * state to TX_ON.
+        * to STATE_FORCE_TRX_OFF then STATE_RX_AACK_ON to recover the
+        * transceiver.
         */
        if (trac)
                at86rf230_async_state_change(lp, ctx, STATE_FORCE_TRX_OFF,
-                                            at86rf230_tx_trac_error, true);
+                                            at86rf230_tx_on, true);
        else
                at86rf230_tx_on(context);
 }
@@ -941,13 +949,18 @@ at86rf230_write_frame_complete(void *context)
        u8 *buf = ctx->buf;
        int rc;
 
-       buf[0] = (RG_TRX_STATE & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
-       buf[1] = STATE_BUSY_TX;
        ctx->trx.len = 2;
-       ctx->msg.complete = NULL;
-       rc = spi_async(lp->spi, &ctx->msg);
-       if (rc)
-               at86rf230_async_error(lp, ctx, rc);
+
+       if (gpio_is_valid(lp->slp_tr)) {
+               at86rf230_slp_tr_rising_edge(lp);
+       } else {
+               buf[0] = (RG_TRX_STATE & CMD_REG_MASK) | CMD_REG | CMD_WRITE;
+               buf[1] = STATE_BUSY_TX;
+               ctx->msg.complete = NULL;
+               rc = spi_async(lp->spi, &ctx->msg);
+               if (rc)
+                       at86rf230_async_error(lp, ctx, rc);
+       }
 }
 
 static void
@@ -993,12 +1006,21 @@ at86rf230_xmit_start(void *context)
         * are in STATE_TX_ON. The pfad differs here, so we change
         * the complete handler.
         */
-       if (lp->tx_aret)
-               at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
-                                            at86rf230_xmit_tx_on, false);
-       else
+       if (lp->tx_aret) {
+               if (lp->is_tx_from_off) {
+                       lp->is_tx_from_off = false;
+                       at86rf230_async_state_change(lp, ctx, STATE_TX_ARET_ON,
+                                                    at86rf230_xmit_tx_on,
+                                                    false);
+               } else {
+                       at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
+                                                    at86rf230_xmit_tx_on,
+                                                    false);
+               }
+       } else {
                at86rf230_async_state_change(lp, ctx, STATE_TX_ON,
                                             at86rf230_write_frame, false);
+       }
 }
 
 static int
@@ -1017,11 +1039,13 @@ at86rf230_xmit(struct ieee802154_hw *hw, struct sk_buff *skb)
         * to TX_ON, the lp->cal_timeout should be reinit by state_delay
         * function then to start in the next 5 minutes.
         */
-       if (time_is_before_jiffies(lp->cal_timeout))
+       if (time_is_before_jiffies(lp->cal_timeout)) {
+               lp->is_tx_from_off = true;
                at86rf230_async_state_change(lp, ctx, STATE_TRX_OFF,
                                             at86rf230_xmit_start, false);
-       else
+       } else {
                at86rf230_xmit_start(ctx);
+       }
 
        return 0;
 }
@@ -1037,9 +1061,6 @@ at86rf230_ed(struct ieee802154_hw *hw, u8 *level)
 static int
 at86rf230_start(struct ieee802154_hw *hw)
 {
-       struct at86rf230_local *lp = hw->priv;
-
-       lp->cal_timeout = jiffies + AT86RF2XX_CAL_LOOP_TIMEOUT;
        return at86rf230_sync_state_change(hw->priv, STATE_RX_AACK_ON);
 }
 
@@ -1673,6 +1694,7 @@ static int at86rf230_probe(struct spi_device *spi)
        lp = hw->priv;
        lp->hw = hw;
        lp->spi = spi;
+       lp->slp_tr = slp_tr;
        hw->parent = &spi->dev;
        hw->vif_data_size = sizeof(*lp);
        ieee802154_random_extended_addr(&hw->phy->perm_extended_addr);
index b227a13f6473404a5082a0a99d4e7067b3daeaf7..9f59f17dc317a254641bdc48973ce78e089761bc 100644 (file)
@@ -599,10 +599,18 @@ static int macvlan_open(struct net_device *dev)
                        goto del_unicast;
        }
 
+       if (dev->flags & IFF_PROMISC) {
+               err = dev_set_promiscuity(lowerdev, 1);
+               if (err < 0)
+                       goto clear_multi;
+       }
+
 hash_add:
        macvlan_hash_add(vlan);
        return 0;
 
+clear_multi:
+       dev_set_allmulti(lowerdev, -1);
 del_unicast:
        dev_uc_del(lowerdev, dev->dev_addr);
 out:
@@ -638,6 +646,9 @@ static int macvlan_stop(struct net_device *dev)
        if (dev->flags & IFF_ALLMULTI)
                dev_set_allmulti(lowerdev, -1);
 
+       if (dev->flags & IFF_PROMISC)
+               dev_set_promiscuity(lowerdev, -1);
+
        dev_uc_del(lowerdev, dev->dev_addr);
 
 hash_del:
@@ -696,6 +707,10 @@ static void macvlan_change_rx_flags(struct net_device *dev, int change)
        if (dev->flags & IFF_UP) {
                if (change & IFF_ALLMULTI)
                        dev_set_allmulti(lowerdev, dev->flags & IFF_ALLMULTI ? 1 : -1);
+               if (change & IFF_PROMISC)
+                       dev_set_promiscuity(lowerdev,
+                                           dev->flags & IFF_PROMISC ? 1 : -1);
+
        }
 }
 
index 8fadaa14b9f0fbd97b689d7bab562eccd30d17bf..70641d2c042957e7e154b4a1d265f39fe3ffa386 100644 (file)
@@ -27,6 +27,7 @@ config AMD_PHY
 config AMD_XGBE_PHY
        tristate "Driver for the AMD 10GbE (amd-xgbe) PHYs"
        depends on (OF || ACPI) && HAS_IOMEM
+       depends on ARM64 || COMPILE_TEST
        ---help---
          Currently supports the AMD 10GbE PHY
 
index fb276f64cd6400cc7617c2586582c378eb2e9c53..34a75cba3b739ce5b4f28e1549915e19502fb4cc 100644 (file)
@@ -755,6 +755,45 @@ static int amd_xgbe_phy_set_mode(struct phy_device *phydev,
        return ret;
 }
 
+static bool amd_xgbe_phy_use_xgmii_mode(struct phy_device *phydev)
+{
+       if (phydev->autoneg == AUTONEG_ENABLE) {
+               if (phydev->advertising & ADVERTISED_10000baseKR_Full)
+                       return true;
+       } else {
+               if (phydev->speed == SPEED_10000)
+                       return true;
+       }
+
+       return false;
+}
+
+static bool amd_xgbe_phy_use_gmii_2500_mode(struct phy_device *phydev)
+{
+       if (phydev->autoneg == AUTONEG_ENABLE) {
+               if (phydev->advertising & ADVERTISED_2500baseX_Full)
+                       return true;
+       } else {
+               if (phydev->speed == SPEED_2500)
+                       return true;
+       }
+
+       return false;
+}
+
+static bool amd_xgbe_phy_use_gmii_mode(struct phy_device *phydev)
+{
+       if (phydev->autoneg == AUTONEG_ENABLE) {
+               if (phydev->advertising & ADVERTISED_1000baseKX_Full)
+                       return true;
+       } else {
+               if (phydev->speed == SPEED_1000)
+                       return true;
+       }
+
+       return false;
+}
+
 static int amd_xgbe_phy_set_an(struct phy_device *phydev, bool enable,
                               bool restart)
 {
@@ -1235,11 +1274,11 @@ static int amd_xgbe_phy_config_init(struct phy_device *phydev)
        /* Set initial mode - call the mode setting routines
         * directly to insure we are properly configured
         */
-       if (phydev->advertising & SUPPORTED_10000baseKR_Full)
+       if (amd_xgbe_phy_use_xgmii_mode(phydev))
                ret = amd_xgbe_phy_xgmii_mode(phydev);
-       else if (phydev->advertising & SUPPORTED_1000baseKX_Full)
+       else if (amd_xgbe_phy_use_gmii_mode(phydev))
                ret = amd_xgbe_phy_gmii_mode(phydev);
-       else if (phydev->advertising & SUPPORTED_2500baseX_Full)
+       else if (amd_xgbe_phy_use_gmii_2500_mode(phydev))
                ret = amd_xgbe_phy_gmii_2500_mode(phydev);
        else
                ret = -EINVAL;
index 64c74c6a482806bfc5d2bb4f821b4b1ef085adfd..b5dc59de094eef06838d4601cacd9dbeaba04a6a 100644 (file)
@@ -404,7 +404,7 @@ static struct phy_driver bcm7xxx_driver[] = {
        .name           = "Broadcom BCM7425",
        .features       = PHY_GBIT_FEATURES |
                          SUPPORTED_Pause | SUPPORTED_Asym_Pause,
-       .flags          = 0,
+       .flags          = PHY_IS_INTERNAL,
        .config_init    = bcm7xxx_config_init,
        .config_aneg    = genphy_config_aneg,
        .read_status    = genphy_read_status,
index 496e02f961d37039ff56d5e45a8aa28aa0f44b91..00cb41e713123689803e5dddfa527c3ebaee26ae 100644 (file)
@@ -47,7 +47,7 @@
 #define PSF_TX         0x1000
 #define EXT_EVENT      1
 #define CAL_EVENT      7
-#define CAL_TRIGGER    7
+#define CAL_TRIGGER    1
 #define DP83640_N_PINS 12
 
 #define MII_DP83640_MICR 0x11
@@ -496,7 +496,9 @@ static int ptp_dp83640_enable(struct ptp_clock_info *ptp,
                        else
                                evnt |= EVNT_RISE;
                }
+               mutex_lock(&clock->extreg_lock);
                ext_write(0, phydev, PAGE5, PTP_EVNT, evnt);
+               mutex_unlock(&clock->extreg_lock);
                return 0;
 
        case PTP_CLK_REQ_PEROUT:
@@ -532,6 +534,8 @@ static u8 status_frame_src[6] = { 0x08, 0x00, 0x17, 0x0B, 0x6B, 0x0F };
 
 static void enable_status_frames(struct phy_device *phydev, bool on)
 {
+       struct dp83640_private *dp83640 = phydev->priv;
+       struct dp83640_clock *clock = dp83640->clock;
        u16 cfg0 = 0, ver;
 
        if (on)
@@ -539,9 +543,13 @@ static void enable_status_frames(struct phy_device *phydev, bool on)
 
        ver = (PSF_PTPVER & VERSIONPTP_MASK) << VERSIONPTP_SHIFT;
 
+       mutex_lock(&clock->extreg_lock);
+
        ext_write(0, phydev, PAGE5, PSF_CFG0, cfg0);
        ext_write(0, phydev, PAGE6, PSF_CFG1, ver);
 
+       mutex_unlock(&clock->extreg_lock);
+
        if (!phydev->attached_dev) {
                pr_warn("expected to find an attached netdevice\n");
                return;
@@ -838,7 +846,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
        list_del_init(&rxts->list);
        phy2rxts(phy_rxts, rxts);
 
-       spin_lock_irqsave(&dp83640->rx_queue.lock, flags);
+       spin_lock(&dp83640->rx_queue.lock);
        skb_queue_walk(&dp83640->rx_queue, skb) {
                struct dp83640_skb_info *skb_info;
 
@@ -853,7 +861,7 @@ static void decode_rxts(struct dp83640_private *dp83640,
                        break;
                }
        }
-       spin_unlock_irqrestore(&dp83640->rx_queue.lock, flags);
+       spin_unlock(&dp83640->rx_queue.lock);
 
        if (!shhwtstamps)
                list_add_tail(&rxts->list, &dp83640->rxts);
@@ -1173,11 +1181,18 @@ static int dp83640_config_init(struct phy_device *phydev)
 
        if (clock->chosen && !list_empty(&clock->phylist))
                recalibrate(clock);
-       else
+       else {
+               mutex_lock(&clock->extreg_lock);
                enable_broadcast(phydev, clock->page, 1);
+               mutex_unlock(&clock->extreg_lock);
+       }
 
        enable_status_frames(phydev, true);
+
+       mutex_lock(&clock->extreg_lock);
        ext_write(0, phydev, PAGE4, PTP_CTL, PTP_ENABLE);
+       mutex_unlock(&clock->extreg_lock);
+
        return 0;
 }
 
index c9cb486c753d053c8b6da529a0c078d89f9c2a2f..53d18150f4e291bb4bb047a18a9877d3a82a08f4 100644 (file)
@@ -168,7 +168,10 @@ static struct mii_bus *mdio_gpio_bus_init(struct device *dev,
                if (!new_bus->irq[i])
                        new_bus->irq[i] = PHY_POLL;
 
-       snprintf(new_bus->id, MII_BUS_ID_SIZE, "gpio-%x", bus_id);
+       if (bus_id != -1)
+               snprintf(new_bus->id, MII_BUS_ID_SIZE, "gpio-%x", bus_id);
+       else
+               strncpy(new_bus->id, "gpio", MII_BUS_ID_SIZE);
 
        if (devm_gpio_request(dev, bitbang->mdc, "mdc"))
                goto out_free_bus;
index 1190fd8f008862bc8f70f271575839d280a8f906..ebdc357c513167515baef710ba56d8b6b8e57cb9 100644 (file)
@@ -548,7 +548,8 @@ static int kszphy_probe(struct phy_device *phydev)
        }
 
        clk = devm_clk_get(&phydev->dev, "rmii-ref");
-       if (!IS_ERR(clk)) {
+       /* NOTE: clk may be NULL if building without CONFIG_HAVE_CLK */
+       if (!IS_ERR_OR_NULL(clk)) {
                unsigned long rate = clk_get_rate(clk);
                bool rmii_ref_clk_sel_25_mhz;
 
index 52cd8db2c57daad2767dec72149f4cdabbcf6917..47cd578052fc2328169fcc9df304be79e7af9ac5 100644 (file)
@@ -742,6 +742,9 @@ EXPORT_SYMBOL(phy_stop);
  */
 void phy_start(struct phy_device *phydev)
 {
+       bool do_resume = false;
+       int err = 0;
+
        mutex_lock(&phydev->lock);
 
        switch (phydev->state) {
@@ -752,11 +755,22 @@ void phy_start(struct phy_device *phydev)
                phydev->state = PHY_UP;
                break;
        case PHY_HALTED:
+               /* make sure interrupts are re-enabled for the PHY */
+               err = phy_enable_interrupts(phydev);
+               if (err < 0)
+                       break;
+
                phydev->state = PHY_RESUMING;
+               do_resume = true;
+               break;
        default:
                break;
        }
        mutex_unlock(&phydev->lock);
+
+       /* if phy was suspended, bring the physical link up again */
+       if (do_resume)
+               phy_resume(phydev);
 }
 EXPORT_SYMBOL(phy_start);
 
@@ -769,7 +783,7 @@ void phy_state_machine(struct work_struct *work)
        struct delayed_work *dwork = to_delayed_work(work);
        struct phy_device *phydev =
                        container_of(dwork, struct phy_device, state_queue);
-       bool needs_aneg = false, do_suspend = false, do_resume = false;
+       bool needs_aneg = false, do_suspend = false;
        int err = 0;
 
        mutex_lock(&phydev->lock);
@@ -888,14 +902,6 @@ void phy_state_machine(struct work_struct *work)
                }
                break;
        case PHY_RESUMING:
-               err = phy_clear_interrupt(phydev);
-               if (err)
-                       break;
-
-               err = phy_config_interrupt(phydev, PHY_INTERRUPT_ENABLED);
-               if (err)
-                       break;
-
                if (AUTONEG_ENABLE == phydev->autoneg) {
                        err = phy_aneg_done(phydev);
                        if (err < 0)
@@ -933,7 +939,6 @@ void phy_state_machine(struct work_struct *work)
                        }
                        phydev->adjust_link(phydev->attached_dev);
                }
-               do_resume = true;
                break;
        }
 
@@ -943,8 +948,6 @@ void phy_state_machine(struct work_struct *work)
                err = phy_start_aneg(phydev);
        else if (do_suspend)
                phy_suspend(phydev);
-       else if (do_resume)
-               phy_resume(phydev);
 
        if (err < 0)
                phy_error(phydev);
@@ -1053,13 +1056,14 @@ int phy_init_eee(struct phy_device *phydev, bool clk_stop_enable)
 {
        /* According to 802.3az,the EEE is supported only in full duplex-mode.
         * Also EEE feature is active when core is operating with MII, GMII
-        * or RGMII. Internal PHYs are also allowed to proceed and should
-        * return an error if they do not support EEE.
+        * or RGMII (all kinds). Internal PHYs are also allowed to proceed and
+        * should return an error if they do not support EEE.
         */
        if ((phydev->duplex == DUPLEX_FULL) &&
            ((phydev->interface == PHY_INTERFACE_MODE_MII) ||
            (phydev->interface == PHY_INTERFACE_MODE_GMII) ||
-           (phydev->interface == PHY_INTERFACE_MODE_RGMII) ||
+           (phydev->interface >= PHY_INTERFACE_MODE_RGMII &&
+            phydev->interface <= PHY_INTERFACE_MODE_RGMII_TXID) ||
             phy_is_internal(phydev))) {
                int eee_lp, eee_cap, eee_adv;
                u32 lp, cap, adv;
index aa1dd926623ad622e3a15f905733e3d585aca871..b62a5e3a1c652d27e2bbb0d2a8a88990c3fef027 100644 (file)
@@ -465,6 +465,10 @@ static void pppoe_unbind_sock_work(struct work_struct *work)
        struct sock *sk = sk_pppox(po);
 
        lock_sock(sk);
+       if (po->pppoe_dev) {
+               dev_put(po->pppoe_dev);
+               po->pppoe_dev = NULL;
+       }
        pppox_unbind_sock(sk);
        release_sock(sk);
        sock_put(sk);
index c3e4da9e79ca071a06082e965a3aec5bb206a77e..8067b8fbb0eea42b106cc56ffe6bc216ae01f85a 100644 (file)
@@ -1182,7 +1182,7 @@ cdc_ncm_fill_tx_frame(struct usbnet *dev, struct sk_buff *skb, __le32 sign)
         * payload data instead.
         */
        usbnet_set_skb_tx_stats(skb_out, n,
-                               ctx->tx_curr_frame_payload - skb_out->len);
+                               (long)ctx->tx_curr_frame_payload - skb_out->len);
 
        return skb_out;
 
index ac4d03b328b130ab918175b1fa5c8fe55a0cbc7b..aafa1a1898e43de0d3d06e7d8367751473f25142 100644 (file)
@@ -4116,6 +4116,7 @@ static struct usb_device_id rtl8152_table[] = {
        {REALTEK_USB_DEVICE(VENDOR_ID_REALTEK, 0x8153)},
        {REALTEK_USB_DEVICE(VENDOR_ID_SAMSUNG, 0xa101)},
        {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x7205)},
+       {REALTEK_USB_DEVICE(VENDOR_ID_LENOVO,  0x304f)},
        {}
 };
 
index 733f4feb2ef3c5f11bbf99af962ecbb77253314b..3c86b107275a899f3748e4cfee82ab2bb43aff6b 100644 (file)
@@ -1285,7 +1285,7 @@ netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
                                     struct net_device *net)
 {
        struct usbnet           *dev = netdev_priv(net);
-       int                     length;
+       unsigned int                    length;
        struct urb              *urb = NULL;
        struct skb_data         *entry;
        struct driver_info      *info = dev->driver_info;
@@ -1413,7 +1413,7 @@ not_drop:
                }
        } else
                netif_dbg(dev, tx_queued, dev->net,
-                         "> tx, len %d, type 0x%x\n", length, skb->protocol);
+                         "> tx, len %u, type 0x%x\n", length, skb->protocol);
 #ifdef CONFIG_PM
 deferred:
 #endif
index 27a5f954f8e999cc809dc5ed0bad08d2e06e0f18..21a0fbf1ed947a83506de920f7f61501457bfe68 100644 (file)
@@ -2961,7 +2961,7 @@ static void __net_exit vxlan_exit_net(struct net *net)
                 * to the list by the previous loop.
                 */
                if (!net_eq(dev_net(vxlan->dev), net))
-                       unregister_netdevice_queue(dev, &list);
+                       unregister_netdevice_queue(vxlan->dev, &list);
        }
 
        unregister_netdevice_many(&list);
index 0acd079ba96bd3d2f60602ebf5f889f36da9f908..3ad79bb4f2c21c94b6c41c526a7e033e0937ed77 100644 (file)
@@ -1103,28 +1103,14 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf,
        struct sk_buff *skb;
        struct ath_frame_info *fi;
        struct ieee80211_tx_info *info;
-       struct ieee80211_vif *vif;
        struct ath_hw *ah = sc->sc_ah;
 
        if (sc->tx99_state || !ah->tpc_enabled)
                return MAX_RATE_POWER;
 
        skb = bf->bf_mpdu;
-       info = IEEE80211_SKB_CB(skb);
-       vif = info->control.vif;
-
-       if (!vif) {
-               max_power = sc->cur_chan->cur_txpower;
-               goto out;
-       }
-
-       if (vif->bss_conf.txpower_type != NL80211_TX_POWER_LIMITED) {
-               max_power = min_t(u8, sc->cur_chan->cur_txpower,
-                                 2 * vif->bss_conf.txpower);
-               goto out;
-       }
-
        fi = get_frame_info(skb);
+       info = IEEE80211_SKB_CB(skb);
 
        if (!AR_SREV_9300_20_OR_LATER(ah)) {
                int txpower = fi->tx_power;
@@ -1161,25 +1147,26 @@ static u8 ath_get_rate_txpower(struct ath_softc *sc, struct ath_buf *bf,
                        txpower -= 2;
 
                txpower = max(txpower, 0);
-               max_power = min_t(u8, ah->tx_power[rateidx],
-                                 2 * vif->bss_conf.txpower);
-               max_power = min_t(u8, max_power, txpower);
+               max_power = min_t(u8, ah->tx_power[rateidx], txpower);
+
+               /* XXX: clamp minimum TX power at 1 for AR9160 since if
+                * max_power is set to 0, frames are transmitted at max
+                * TX power
+                */
+               if (!max_power && !AR_SREV_9280_20_OR_LATER(ah))
+                       max_power = 1;
        } else if (!bf->bf_state.bfs_paprd) {
                if (rateidx < 8 && (info->flags & IEEE80211_TX_CTL_STBC))
                        max_power = min_t(u8, ah->tx_power_stbc[rateidx],
-                                         2 * vif->bss_conf.txpower);
+                                         fi->tx_power);
                else
                        max_power = min_t(u8, ah->tx_power[rateidx],
-                                         2 * vif->bss_conf.txpower);
-               max_power = min(max_power, fi->tx_power);
+                                         fi->tx_power);
        } else {
                max_power = ah->paprd_training_power;
        }
-out:
-       /* XXX: clamp minimum TX power at 1 for AR9160 since if max_power
-        * is set to 0, frames are transmitted at max TX power
-        */
-       return (!max_power && !AR_SREV_9280_20_OR_LATER(ah)) ? 1 : max_power;
+
+       return max_power;
 }
 
 static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf,
@@ -2129,6 +2116,7 @@ static void setup_frame_info(struct ieee80211_hw *hw,
        struct ath_node *an = NULL;
        enum ath9k_key_type keytype;
        bool short_preamble = false;
+       u8 txpower;
 
        /*
         * We check if Short Preamble is needed for the CTS rate by
@@ -2145,6 +2133,16 @@ static void setup_frame_info(struct ieee80211_hw *hw,
        if (sta)
                an = (struct ath_node *) sta->drv_priv;
 
+       if (tx_info->control.vif) {
+               struct ieee80211_vif *vif = tx_info->control.vif;
+
+               txpower = 2 * vif->bss_conf.txpower;
+       } else {
+               struct ath_softc *sc = hw->priv;
+
+               txpower = sc->cur_chan->cur_txpower;
+       }
+
        memset(fi, 0, sizeof(*fi));
        fi->txq = -1;
        if (hw_key)
@@ -2155,7 +2153,7 @@ static void setup_frame_info(struct ieee80211_hw *hw,
                fi->keyix = ATH9K_TXKEYIX_INVALID;
        fi->keytype = keytype;
        fi->framelen = framelen;
-       fi->tx_power = MAX_RATE_POWER;
+       fi->tx_power = txpower;
 
        if (!rate)
                return;
index 4ec9811f49c87744458ed16cdcec32422432dc3f..65efb146898844510aa489502ee2c9db23906c92 100644 (file)
@@ -511,11 +511,9 @@ static int brcmf_msgbuf_query_dcmd(struct brcmf_pub *drvr, int ifidx,
                                     msgbuf->rx_pktids,
                                     msgbuf->ioctl_resp_pktid);
        if (msgbuf->ioctl_resp_ret_len != 0) {
-               if (!skb) {
-                       brcmf_err("Invalid packet id idx recv'd %d\n",
-                                 msgbuf->ioctl_resp_pktid);
+               if (!skb)
                        return -EBADF;
-               }
+
                memcpy(buf, skb->data, (len < msgbuf->ioctl_resp_ret_len) ?
                                       len : msgbuf->ioctl_resp_ret_len);
        }
@@ -874,10 +872,8 @@ brcmf_msgbuf_process_txstatus(struct brcmf_msgbuf *msgbuf, void *buf)
        flowid -= BRCMF_NROF_H2D_COMMON_MSGRINGS;
        skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
                                     msgbuf->tx_pktids, idx);
-       if (!skb) {
-               brcmf_err("Invalid packet id idx recv'd %d\n", idx);
+       if (!skb)
                return;
-       }
 
        set_bit(flowid, msgbuf->txstatus_done_map);
        commonring = msgbuf->flowrings[flowid];
@@ -1156,6 +1152,8 @@ brcmf_msgbuf_process_rx_complete(struct brcmf_msgbuf *msgbuf, void *buf)
 
        skb = brcmf_msgbuf_get_pktid(msgbuf->drvr->bus_if->dev,
                                     msgbuf->rx_pktids, idx);
+       if (!skb)
+               return;
 
        if (data_offset)
                skb_pull(skb, data_offset);
index ab019b45551b9ea9bef61a1861feba7601897a5f..f89f446e5c8ae32b5dccc42cae6234ad75283ff8 100644 (file)
@@ -21,6 +21,7 @@ config IWLWIFI
                Intel 7260 Wi-Fi Adapter
                Intel 3160 Wi-Fi Adapter
                Intel 7265 Wi-Fi Adapter
+               Intel 3165 Wi-Fi Adapter
 
 
          This driver uses the kernel's mac80211 subsystem.
index 36e786f0387bd42593fe3c8ec523831694483bea..74ad278116be3feb18b3a2a98e4034aa3145a6a1 100644 (file)
 
 /* Highest firmware API version supported */
 #define IWL7260_UCODE_API_MAX  13
-#define IWL3160_UCODE_API_MAX  13
 
 /* Oldest version we won't warn about */
 #define IWL7260_UCODE_API_OK   12
-#define IWL3160_UCODE_API_OK   12
+#define IWL3165_UCODE_API_OK   13
 
 /* Lowest firmware API version supported */
 #define IWL7260_UCODE_API_MIN  10
-#define IWL3160_UCODE_API_MIN  10
+#define IWL3165_UCODE_API_MIN  13
 
 /* NVM versions */
 #define IWL7260_NVM_VERSION            0x0a1d
 #define IWL3160_FW_PRE "iwlwifi-3160-"
 #define IWL3160_MODULE_FIRMWARE(api) IWL3160_FW_PRE __stringify(api) ".ucode"
 
-#define IWL3165_FW_PRE "iwlwifi-3165-"
-#define IWL3165_MODULE_FIRMWARE(api) IWL3165_FW_PRE __stringify(api) ".ucode"
-
 #define IWL7265_FW_PRE "iwlwifi-7265-"
 #define IWL7265_MODULE_FIRMWARE(api) IWL7265_FW_PRE __stringify(api) ".ucode"
 
@@ -248,8 +244,13 @@ static const struct iwl_ht_params iwl7265_ht_params = {
 
 const struct iwl_cfg iwl3165_2ac_cfg = {
        .name = "Intel(R) Dual Band Wireless AC 3165",
-       .fw_name_pre = IWL3165_FW_PRE,
+       .fw_name_pre = IWL7265D_FW_PRE,
        IWL_DEVICE_7000,
+       /* sparse doens't like the re-assignment but it is safe */
+#ifndef __CHECKER__
+       .ucode_api_ok = IWL3165_UCODE_API_OK,
+       .ucode_api_min = IWL3165_UCODE_API_MIN,
+#endif
        .ht_params = &iwl7000_ht_params,
        .nvm_ver = IWL3165_NVM_VERSION,
        .nvm_calib_ver = IWL3165_TX_POWER_VERSION,
@@ -325,6 +326,5 @@ const struct iwl_cfg iwl7265d_n_cfg = {
 
 MODULE_FIRMWARE(IWL7260_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
 MODULE_FIRMWARE(IWL3160_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
-MODULE_FIRMWARE(IWL3165_MODULE_FIRMWARE(IWL3160_UCODE_API_OK));
 MODULE_FIRMWARE(IWL7265_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
 MODULE_FIRMWARE(IWL7265D_MODULE_FIRMWARE(IWL7260_UCODE_API_OK));
index 41ff85de73343b0a5686bfd175164807e8dc4684..21302b6f2bfd79a8e8617a345e3771f6608c0145 100644 (file)
@@ -6,6 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -31,6 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -748,6 +750,9 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg,
                return;
        }
 
+       if (data->sku_cap_mimo_disabled)
+               rx_chains = 1;
+
        ht_info->ht_supported = true;
        ht_info->cap = IEEE80211_HT_CAP_DSSSCCK40;
 
index 5234a0bf11e4e3286b740c22518f4a039e224e94..750c8c9ee70d0352e5828049ff4b138e31a3ae6c 100644 (file)
@@ -6,6 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -31,6 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
+ * Copyright(c) 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -84,6 +86,7 @@ struct iwl_nvm_data {
        bool sku_cap_11ac_enable;
        bool sku_cap_amt_enable;
        bool sku_cap_ipan_enable;
+       bool sku_cap_mimo_disabled;
 
        u16 radio_cfg_type;
        u8 radio_cfg_step;
index bfdf3faa6c470dafbd9a66672b2f55e38b0872bc..62db2e5e45ebd51793c372e54832b21511057c58 100644 (file)
@@ -244,6 +244,7 @@ enum iwl_ucode_tlv_flag {
  *     longer than the passive one, which is essential for fragmented scan.
  * @IWL_UCODE_TLV_API_WIFI_MCC_UPDATE: ucode supports MCC updates with source.
  * IWL_UCODE_TLV_API_HDC_PHASE_0: ucode supports finer configuration of LTR
+ * @IWL_UCODE_TLV_API_TX_POWER_DEV: new API for tx power.
  * @IWL_UCODE_TLV_API_BASIC_DWELL: use only basic dwell time in scan command,
  *     regardless of the band or the number of the probes. FW will calculate
  *     the actual dwell time.
@@ -260,6 +261,7 @@ enum iwl_ucode_tlv_api {
        IWL_UCODE_TLV_API_FRAGMENTED_SCAN       = BIT(8),
        IWL_UCODE_TLV_API_WIFI_MCC_UPDATE       = BIT(9),
        IWL_UCODE_TLV_API_HDC_PHASE_0           = BIT(10),
+       IWL_UCODE_TLV_API_TX_POWER_DEV          = BIT(11),
        IWL_UCODE_TLV_API_BASIC_DWELL           = BIT(13),
        IWL_UCODE_TLV_API_SCD_CFG               = BIT(15),
        IWL_UCODE_TLV_API_SINGLE_SCAN_EBS       = BIT(16),
index 83903a5025c2e69779554e7bcf980aff48b3d080..8e604a3931ca6db6a1ab0eff59d2787d8562e494 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -116,10 +116,11 @@ enum family_8000_nvm_offsets {
 
 /* SKU Capabilities (actual values from NVM definition) */
 enum nvm_sku_bits {
-       NVM_SKU_CAP_BAND_24GHZ  = BIT(0),
-       NVM_SKU_CAP_BAND_52GHZ  = BIT(1),
-       NVM_SKU_CAP_11N_ENABLE  = BIT(2),
-       NVM_SKU_CAP_11AC_ENABLE = BIT(3),
+       NVM_SKU_CAP_BAND_24GHZ          = BIT(0),
+       NVM_SKU_CAP_BAND_52GHZ          = BIT(1),
+       NVM_SKU_CAP_11N_ENABLE          = BIT(2),
+       NVM_SKU_CAP_11AC_ENABLE         = BIT(3),
+       NVM_SKU_CAP_MIMO_DISABLE        = BIT(5),
 };
 
 /*
@@ -368,6 +369,11 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg,
        if (cfg->ht_params->ldpc)
                vht_cap->cap |= IEEE80211_VHT_CAP_RXLDPC;
 
+       if (data->sku_cap_mimo_disabled) {
+               num_rx_ants = 1;
+               num_tx_ants = 1;
+       }
+
        if (num_tx_ants > 1)
                vht_cap->cap |= IEEE80211_VHT_CAP_TXSTBC;
        else
@@ -465,7 +471,7 @@ static int iwl_get_radio_cfg(const struct iwl_cfg *cfg, const __le16 *nvm_sw,
        if (cfg->device_family != IWL_DEVICE_FAMILY_8000)
                return le16_to_cpup(nvm_sw + RADIO_CFG);
 
-       return le32_to_cpup((__le32 *)(nvm_sw + RADIO_CFG_FAMILY_8000));
+       return le32_to_cpup((__le32 *)(phy_sku + RADIO_CFG_FAMILY_8000));
 
 }
 
@@ -527,6 +533,10 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
        const u8 *hw_addr;
 
        if (mac_override) {
+               static const u8 reserved_mac[] = {
+                       0x02, 0xcc, 0xaa, 0xff, 0xee, 0x00
+               };
+
                hw_addr = (const u8 *)(mac_override +
                                 MAC_ADDRESS_OVERRIDE_FAMILY_8000);
 
@@ -538,7 +548,12 @@ static void iwl_set_hw_address_family_8000(struct device *dev,
                data->hw_addr[4] = hw_addr[5];
                data->hw_addr[5] = hw_addr[4];
 
-               if (is_valid_ether_addr(data->hw_addr))
+               /*
+                * Force the use of the OTP MAC address in case of reserved MAC
+                * address in the NVM, or if address is given but invalid.
+                */
+               if (is_valid_ether_addr(data->hw_addr) &&
+                   memcmp(reserved_mac, hw_addr, ETH_ALEN) != 0)
                        return;
 
                IWL_ERR_DEV(dev,
@@ -610,6 +625,7 @@ iwl_parse_nvm_data(struct device *dev, const struct iwl_cfg *cfg,
                data->sku_cap_11n_enable = false;
        data->sku_cap_11ac_enable = data->sku_cap_11n_enable &&
                                    (sku & NVM_SKU_CAP_11AC_ENABLE);
+       data->sku_cap_mimo_disabled = sku & NVM_SKU_CAP_MIMO_DISABLE;
 
        data->n_hw_addrs = iwl_get_n_hw_addrs(cfg, nvm_sw);
 
index 6dfed1259260f06d23feb544d78ce6484c01fb36..56254a837214ffad421a7b7a14a9eaec3d7dc029 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -421,8 +421,9 @@ struct iwl_trans_txq_scd_cfg {
  *
  * All the handlers MUST be implemented
  *
- * @start_hw: starts the HW- from that point on, the HW can send interrupts
- *     May sleep
+ * @start_hw: starts the HW. If low_power is true, the NIC needs to be taken
+ *     out of a low power state. From that point on, the HW can send
+ *     interrupts. May sleep.
  * @op_mode_leave: Turn off the HW RF kill indication if on
  *     May sleep
  * @start_fw: allocates and inits all the resources for the transport
@@ -432,10 +433,11 @@ struct iwl_trans_txq_scd_cfg {
  *     the SCD base address in SRAM, then provide it here, or 0 otherwise.
  *     May sleep
  * @stop_device: stops the whole device (embedded CPU put to reset) and stops
- *     the HW. From that point on, the HW will be in low power but will still
- *     issue interrupt if the HW RF kill is triggered. This callback must do
- *     the right thing and not crash even if start_hw() was called but not
- *     start_fw(). May sleep
+ *     the HW. If low_power is true, the NIC will be put in low power state.
+ *     From that point on, the HW will be stopped but will still issue an
+ *     interrupt if the HW RF kill switch is triggered.
+ *     This callback must do the right thing and not crash even if %start_hw()
+ *     was called but not &start_fw(). May sleep.
  * @d3_suspend: put the device into the correct mode for WoWLAN during
  *     suspend. This is optional, if not implemented WoWLAN will not be
  *     supported. This callback may sleep.
@@ -491,14 +493,14 @@ struct iwl_trans_txq_scd_cfg {
  */
 struct iwl_trans_ops {
 
-       int (*start_hw)(struct iwl_trans *iwl_trans);
+       int (*start_hw)(struct iwl_trans *iwl_trans, bool low_power);
        void (*op_mode_leave)(struct iwl_trans *iwl_trans);
        int (*start_fw)(struct iwl_trans *trans, const struct fw_img *fw,
                        bool run_in_rfkill);
        int (*update_sf)(struct iwl_trans *trans,
                         struct iwl_sf_region *st_fwrd_space);
        void (*fw_alive)(struct iwl_trans *trans, u32 scd_addr);
-       void (*stop_device)(struct iwl_trans *trans);
+       void (*stop_device)(struct iwl_trans *trans, bool low_power);
 
        void (*d3_suspend)(struct iwl_trans *trans, bool test);
        int (*d3_resume)(struct iwl_trans *trans, enum iwl_d3_status *status,
@@ -652,11 +654,16 @@ static inline void iwl_trans_configure(struct iwl_trans *trans,
        trans->ops->configure(trans, trans_cfg);
 }
 
-static inline int iwl_trans_start_hw(struct iwl_trans *trans)
+static inline int _iwl_trans_start_hw(struct iwl_trans *trans, bool low_power)
 {
        might_sleep();
 
-       return trans->ops->start_hw(trans);
+       return trans->ops->start_hw(trans, low_power);
+}
+
+static inline int iwl_trans_start_hw(struct iwl_trans *trans)
+{
+       return trans->ops->start_hw(trans, true);
 }
 
 static inline void iwl_trans_op_mode_leave(struct iwl_trans *trans)
@@ -703,15 +710,21 @@ static inline int iwl_trans_update_sf(struct iwl_trans *trans,
        return 0;
 }
 
-static inline void iwl_trans_stop_device(struct iwl_trans *trans)
+static inline void _iwl_trans_stop_device(struct iwl_trans *trans,
+                                         bool low_power)
 {
        might_sleep();
 
-       trans->ops->stop_device(trans);
+       trans->ops->stop_device(trans, low_power);
 
        trans->state = IWL_TRANS_NO_FW;
 }
 
+static inline void iwl_trans_stop_device(struct iwl_trans *trans)
+{
+       _iwl_trans_stop_device(trans, true);
+}
+
 static inline void iwl_trans_d3_suspend(struct iwl_trans *trans, bool test)
 {
        might_sleep();
index d954591e0be58528d138f8738b2cb2325db1fed3..6ac6de2af9779982231d1efb4c6186fad4442f5d 100644 (file)
@@ -776,7 +776,7 @@ static int iwl_mvm_bt_coex_reduced_txp(struct iwl_mvm *mvm, u8 sta_id,
        struct iwl_host_cmd cmd = {
                .id = BT_CONFIG,
                .len = { sizeof(*bt_cmd), },
-               .dataflags = { IWL_HCMD_DFL_NOCOPY, },
+               .dataflags = { IWL_HCMD_DFL_DUP, },
                .flags = CMD_ASYNC,
        };
        struct iwl_mvm_sta *mvmsta;
index a6c48c7b1e1683fdbdcb99fd0e4f971cf6ad66d2..4310cf102d78ecd4f3e7baffa13570d878153cb4 100644 (file)
@@ -1726,7 +1726,7 @@ iwl_mvm_netdetect_query_results(struct iwl_mvm *mvm,
        results->matched_profiles = le32_to_cpu(query->matched_profiles);
        memcpy(results->matches, query->matches, sizeof(results->matches));
 
-#ifdef CPTCFG_IWLWIFI_DEBUGFS
+#ifdef CONFIG_IWLWIFI_DEBUGFS
        mvm->last_netdetect_scans = le32_to_cpu(query->n_scans_done);
 #endif
 
@@ -1750,8 +1750,10 @@ static void iwl_mvm_query_netdetect_reasons(struct iwl_mvm *mvm,
        int i, j, n_matches, ret;
 
        fw_status = iwl_mvm_get_wakeup_status(mvm, vif);
-       if (!IS_ERR_OR_NULL(fw_status))
+       if (!IS_ERR_OR_NULL(fw_status)) {
                reasons = le32_to_cpu(fw_status->wakeup_reasons);
+               kfree(fw_status);
+       }
 
        if (reasons & IWL_WOWLAN_WAKEUP_BY_RFKILL_DEASSERTED)
                wakeup.rfkill_release = true;
@@ -1868,15 +1870,15 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
        /* get the BSS vif pointer again */
        vif = iwl_mvm_get_bss_vif(mvm);
        if (IS_ERR_OR_NULL(vif))
-               goto out_unlock;
+               goto err;
 
        ret = iwl_trans_d3_resume(mvm->trans, &d3_status, test);
        if (ret)
-               goto out_unlock;
+               goto err;
 
        if (d3_status != IWL_D3_STATUS_ALIVE) {
                IWL_INFO(mvm, "Device was reset during suspend\n");
-               goto out_unlock;
+               goto err;
        }
 
        /* query SRAM first in case we want event logging */
@@ -1902,7 +1904,8 @@ static int __iwl_mvm_resume(struct iwl_mvm *mvm, bool test)
                goto out_iterate;
        }
 
- out_unlock:
+err:
+       iwl_mvm_free_nd(mvm);
        mutex_unlock(&mvm->mutex);
 
 out_iterate:
@@ -1915,6 +1918,14 @@ out:
        /* return 1 to reconfigure the device */
        set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
        set_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status);
+
+       /* We always return 1, which causes mac80211 to do a reconfig
+        * with IEEE80211_RECONFIG_TYPE_RESTART.  This type of
+        * reconfig calls iwl_mvm_restart_complete(), where we unref
+        * the IWL_MVM_REF_UCODE_DOWN, so we need to take the
+        * reference here.
+        */
+       iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
        return 1;
 }
 
@@ -2021,7 +2032,6 @@ static int iwl_mvm_d3_test_release(struct inode *inode, struct file *file)
        __iwl_mvm_resume(mvm, true);
        rtnl_unlock();
        iwl_abort_notification_waits(&mvm->notif_wait);
-       iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN);
        ieee80211_restart_hw(mvm->hw);
 
        /* wait for restart and disconnect all interfaces */
index 4fc0938b3fb6d6c92464f63b3f206cbc798f92b7..b1baa33cc19b3228a8534af71ab69c7dede40520 100644 (file)
@@ -297,6 +297,40 @@ struct iwl_uapsd_misbehaving_ap_notif {
        u8 reserved[3];
 } __packed;
 
+/**
+ * struct iwl_reduce_tx_power_cmd - TX power reduction command
+ * REDUCE_TX_POWER_CMD = 0x9f
+ * @flags: (reserved for future implementation)
+ * @mac_context_id: id of the mac ctx for which we are reducing TX power.
+ * @pwr_restriction: TX power restriction in dBms.
+ */
+struct iwl_reduce_tx_power_cmd {
+       u8 flags;
+       u8 mac_context_id;
+       __le16 pwr_restriction;
+} __packed; /* TX_REDUCED_POWER_API_S_VER_1 */
+
+/**
+ * struct iwl_dev_tx_power_cmd - TX power reduction command
+ * REDUCE_TX_POWER_CMD = 0x9f
+ * @set_mode: 0 - MAC tx power, 1 - device tx power
+ * @mac_context_id: id of the mac ctx for which we are reducing TX power.
+ * @pwr_restriction: TX power restriction in 1/8 dBms.
+ * @dev_24: device TX power restriction in 1/8 dBms
+ * @dev_52_low: device TX power restriction upper band - low
+ * @dev_52_high: device TX power restriction upper band - high
+ */
+struct iwl_dev_tx_power_cmd {
+       __le32 set_mode;
+       __le32 mac_context_id;
+       __le16 pwr_restriction;
+       __le16 dev_24;
+       __le16 dev_52_low;
+       __le16 dev_52_high;
+} __packed; /* TX_REDUCED_POWER_API_S_VER_2 */
+
+#define IWL_DEV_MAX_TX_POWER 0x7FFF
+
 /**
  * struct iwl_beacon_filter_cmd
  * REPLY_BEACON_FILTERING_CMD = 0xd2 (command)
index 4f81dcf57a736e7409087f4b3193809c3570db6f..d6cced47d561b9601a59166296a229facfd9014e 100644 (file)
@@ -122,46 +122,6 @@ enum iwl_scan_complete_status {
        SCAN_COMP_STATUS_ERR_ALLOC_TE = 0x0C,
 };
 
-/**
- * struct iwl_scan_results_notif - scan results for one channel
- * ( SCAN_RESULTS_NOTIFICATION = 0x83 )
- * @channel: which channel the results are from
- * @band: 0 for 5.2 GHz, 1 for 2.4 GHz
- * @probe_status: SCAN_PROBE_STATUS_*, indicates success of probe request
- * @num_probe_not_sent: # of request that weren't sent due to not enough time
- * @duration: duration spent in channel, in usecs
- * @statistics: statistics gathered for this channel
- */
-struct iwl_scan_results_notif {
-       u8 channel;
-       u8 band;
-       u8 probe_status;
-       u8 num_probe_not_sent;
-       __le32 duration;
-       __le32 statistics[SCAN_RESULTS_STATISTICS];
-} __packed; /* SCAN_RESULT_NTF_API_S_VER_2 */
-
-/**
- * struct iwl_scan_complete_notif - notifies end of scanning (all channels)
- * ( SCAN_COMPLETE_NOTIFICATION = 0x84 )
- * @scanned_channels: number of channels scanned (and number of valid results)
- * @status: one of SCAN_COMP_STATUS_*
- * @bt_status: BT on/off status
- * @last_channel: last channel that was scanned
- * @tsf_low: TSF timer (lower half) in usecs
- * @tsf_high: TSF timer (higher half) in usecs
- * @results: array of scan results, only "scanned_channels" of them are valid
- */
-struct iwl_scan_complete_notif {
-       u8 scanned_channels;
-       u8 status;
-       u8 bt_status;
-       u8 last_channel;
-       __le32 tsf_low;
-       __le32 tsf_high;
-       struct iwl_scan_results_notif results[];
-} __packed; /* SCAN_COMPLETE_NTF_API_S_VER_2 */
-
 /* scan offload */
 #define IWL_SCAN_MAX_BLACKLIST_LEN     64
 #define IWL_SCAN_SHORT_BLACKLIST_LEN   16
@@ -554,7 +514,7 @@ struct iwl_scan_req_unified_lmac {
 } __packed;
 
 /**
- * struct iwl_lmac_scan_results_notif - scan results for one channel -
+ * struct iwl_scan_results_notif - scan results for one channel -
  *     SCAN_RESULT_NTF_API_S_VER_3
  * @channel: which channel the results are from
  * @band: 0 for 5.2 GHz, 1 for 2.4 GHz
@@ -562,7 +522,7 @@ struct iwl_scan_req_unified_lmac {
  * @num_probe_not_sent: # of request that weren't sent due to not enough time
  * @duration: duration spent in channel, in usecs
  */
-struct iwl_lmac_scan_results_notif {
+struct iwl_scan_results_notif {
        u8 channel;
        u8 band;
        u8 probe_status;
index aab68cbae754d547a9e1fe514c4c88de6877777f..01b1da6ad35977b349fc79336c15238706ca9078 100644 (file)
@@ -281,19 +281,6 @@ struct iwl_tx_ant_cfg_cmd {
        __le32 valid;
 } __packed;
 
-/**
- * struct iwl_reduce_tx_power_cmd - TX power reduction command
- * REDUCE_TX_POWER_CMD = 0x9f
- * @flags: (reserved for future implementation)
- * @mac_context_id: id of the mac ctx for which we are reducing TX power.
- * @pwr_restriction: TX power restriction in dBms.
- */
-struct iwl_reduce_tx_power_cmd {
-       u8 flags;
-       u8 mac_context_id;
-       __le16 pwr_restriction;
-} __packed; /* TX_REDUCED_POWER_API_S_VER_1 */
-
 /*
  * Calibration control struct.
  * Sent as part of the phy configuration command.
index bc5eac4960e18a79a211da2a2bf6492b1a39e570..df869633f4dd976c9404e036ecf8a49a855fd0b4 100644 (file)
@@ -6,7 +6,7 @@
  * GPL LICENSE SUMMARY
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -32,7 +32,7 @@
  * BSD LICENSE
  *
  * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -322,7 +322,7 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
 
        lockdep_assert_held(&mvm->mutex);
 
-       if (WARN_ON_ONCE(mvm->init_ucode_complete || mvm->calibrating))
+       if (WARN_ON_ONCE(mvm->calibrating))
                return 0;
 
        iwl_init_notification_wait(&mvm->notif_wait,
@@ -396,8 +396,6 @@ int iwl_run_init_mvm_ucode(struct iwl_mvm *mvm, bool read_nvm)
         */
        ret = iwl_wait_notification(&mvm->notif_wait, &calib_wait,
                        MVM_UCODE_CALIB_TIMEOUT);
-       if (!ret)
-               mvm->init_ucode_complete = true;
 
        if (ret && iwl_mvm_is_radio_killed(mvm)) {
                IWL_DEBUG_RF_KILL(mvm, "RFKILL while calibrating.\n");
@@ -494,15 +492,6 @@ int iwl_mvm_fw_dbg_collect_desc(struct iwl_mvm *mvm,
 
        mvm->fw_dump_desc = desc;
 
-       /* stop recording */
-       if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
-               iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
-       } else {
-               iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0);
-               /* wait before we collect the data till the DBGC stop */
-               udelay(100);
-       }
-
        queue_delayed_work(system_wq, &mvm->fw_dump_wk, delay);
 
        return 0;
@@ -658,25 +647,24 @@ int iwl_mvm_up(struct iwl_mvm *mvm)
         * module loading, load init ucode now
         * (for example, if we were in RFKILL)
         */
-       if (!mvm->init_ucode_complete) {
-               ret = iwl_run_init_mvm_ucode(mvm, false);
-               if (ret && !iwlmvm_mod_params.init_dbg) {
-                       IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
-                       /* this can't happen */
-                       if (WARN_ON(ret > 0))
-                               ret = -ERFKILL;
-                       goto error;
-               }
-               if (!iwlmvm_mod_params.init_dbg) {
-                       /*
-                        * should stop and start HW since that INIT
-                        * image just loaded
-                        */
-                       iwl_trans_stop_device(mvm->trans);
-                       ret = iwl_trans_start_hw(mvm->trans);
-                       if (ret)
-                               return ret;
-               }
+       ret = iwl_run_init_mvm_ucode(mvm, false);
+       if (ret && !iwlmvm_mod_params.init_dbg) {
+               IWL_ERR(mvm, "Failed to run INIT ucode: %d\n", ret);
+               /* this can't happen */
+               if (WARN_ON(ret > 0))
+                       ret = -ERFKILL;
+               goto error;
+       }
+       if (!iwlmvm_mod_params.init_dbg) {
+               /*
+                * Stop and start the transport without entering low power
+                * mode. This will save the state of other components on the
+                * device that are triggered by the INIT firwmare (MFUART).
+                */
+               _iwl_trans_stop_device(mvm->trans, false);
+               _iwl_trans_start_hw(mvm->trans, false);
+               if (ret)
+                       return ret;
        }
 
        if (iwlmvm_mod_params.init_dbg)
index 84555170b6f751bb4f0925bf5c85319de76293b6..dda9f7b5f3423173e668f507719e47c3540b27d0 100644 (file)
@@ -1322,7 +1322,7 @@ static void iwl_mvm_restart_complete(struct iwl_mvm *mvm)
 
        clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status);
        iwl_mvm_d0i3_enable_tx(mvm, NULL);
-       ret = iwl_mvm_update_quotas(mvm, false, NULL);
+       ret = iwl_mvm_update_quotas(mvm, true, NULL);
        if (ret)
                IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n",
                        ret);
@@ -1471,8 +1471,8 @@ static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm)
        return NULL;
 }
 
-static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
-                               s8 tx_power)
+static int iwl_mvm_set_tx_power_old(struct iwl_mvm *mvm,
+                                   struct ieee80211_vif *vif, s8 tx_power)
 {
        /* FW is in charge of regulatory enforcement */
        struct iwl_reduce_tx_power_cmd reduce_txpwr_cmd = {
@@ -1485,6 +1485,26 @@ static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
                                    &reduce_txpwr_cmd);
 }
 
+static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
+                               s16 tx_power)
+{
+       struct iwl_dev_tx_power_cmd cmd = {
+               .set_mode = 0,
+               .mac_context_id =
+                       cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id),
+               .pwr_restriction = cpu_to_le16(8 * tx_power),
+       };
+
+       if (!(mvm->fw->ucode_capa.api[0] & IWL_UCODE_TLV_API_TX_POWER_DEV))
+               return iwl_mvm_set_tx_power_old(mvm, vif, tx_power);
+
+       if (tx_power == IWL_DEFAULT_MAX_TX_POWER)
+               cmd.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER);
+
+       return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0,
+                                   sizeof(cmd), &cmd);
+}
+
 static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw,
                                     struct ieee80211_vif *vif)
 {
@@ -3975,9 +3995,6 @@ static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw,
        if (!iwl_fw_dbg_trigger_enabled(mvm->fw, FW_DBG_TRIGGER_MLME))
                return;
 
-       if (event->u.mlme.status == MLME_SUCCESS)
-               return;
-
        trig = iwl_fw_dbg_get_trigger(mvm->fw, FW_DBG_TRIGGER_MLME);
        trig_mlme = (void *)trig->data;
        if (!iwl_fw_dbg_trigger_check_stop(mvm, vif, trig))
index d5522a16124292cd6cab36159851a8446b5e2d95..cf70f681d1acb7e271717091684ca805749e13ab 100644 (file)
@@ -603,7 +603,6 @@ struct iwl_mvm {
 
        enum iwl_ucode_type cur_ucode;
        bool ucode_loaded;
-       bool init_ucode_complete;
        bool calibrating;
        u32 error_event_table;
        u32 log_event_table;
index a08b03d58d4bf0f3ebd4773a7fdb6e07cc8406b8..2ea01238754eb8d1c2470156f0293a2e15988fd6 100644 (file)
@@ -865,6 +865,16 @@ static void iwl_mvm_fw_error_dump_wk(struct work_struct *work)
                return;
 
        mutex_lock(&mvm->mutex);
+
+       /* stop recording */
+       if (mvm->cfg->device_family == IWL_DEVICE_FAMILY_7000) {
+               iwl_set_bits_prph(mvm->trans, MON_BUFF_SAMPLE_CTL, 0x100);
+       } else {
+               iwl_write_prph(mvm->trans, DBGC_IN_SAMPLE, 0);
+               /* wait before we collect the data till the DBGC stop */
+               udelay(100);
+       }
+
        iwl_mvm_fw_error_dump(mvm);
 
        /* start recording again if the firmware is not crashed */
@@ -1253,11 +1263,13 @@ static void iwl_mvm_d0i3_exit_work(struct work_struct *wk)
                ieee80211_iterate_active_interfaces(
                        mvm->hw, IEEE80211_IFACE_ITER_NORMAL,
                        iwl_mvm_d0i3_disconnect_iter, mvm);
-
-       iwl_free_resp(&get_status_cmd);
 out:
        iwl_mvm_d0i3_enable_tx(mvm, qos_seq);
 
+       /* qos_seq might point inside resp_pkt, so free it only now */
+       if (get_status_cmd.resp_pkt)
+               iwl_free_resp(&get_status_cmd);
+
        /* the FW might have updated the regdomain */
        iwl_mvm_update_changed_regdom(mvm);
 
index f9928f2c125f726bbf89474096bd47990bfb86eb..33cd68ae7bf9362539fa1a99e34686e0cca3de2b 100644 (file)
@@ -180,6 +180,9 @@ static bool rs_mimo_allow(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
        if (iwl_mvm_vif_low_latency(mvmvif) && mvmsta->vif->p2p)
                return false;
 
+       if (mvm->nvm_data->sku_cap_mimo_disabled)
+               return false;
+
        return true;
 }
 
index 78ec7db64ba59e886e2a7b18a3df64f70a4ea29c..d6314ddf57b5d9638fcfd2fcf6ba917bb136779f 100644 (file)
@@ -478,6 +478,11 @@ static void iwl_mvm_stat_iterator(void *_data, u8 *mac,
        if (vif->type != NL80211_IFTYPE_STATION)
                return;
 
+       if (sig == 0) {
+               IWL_DEBUG_RX(mvm, "RSSI is 0 - skip signal based decision\n");
+               return;
+       }
+
        mvmvif->bf_data.ave_beacon_signal = sig;
 
        /* BT Coex */
index 74e1c86289dcbcedc1f5c7b963e468095de7cf25..1075a213bd6a87156e44ab410ac566cf18bdacc3 100644 (file)
@@ -319,7 +319,7 @@ int iwl_mvm_rx_scan_offload_iter_complete_notif(struct iwl_mvm *mvm,
                                                struct iwl_device_cmd *cmd)
 {
        struct iwl_rx_packet *pkt = rxb_addr(rxb);
-       struct iwl_scan_complete_notif *notif = (void *)pkt->data;
+       struct iwl_lmac_scan_complete_notif *notif = (void *)pkt->data;
 
        IWL_DEBUG_SCAN(mvm,
                       "Scan offload iteration complete: status=0x%x scanned channels=%d\n",
index 01996c9d98a79b1d62e3a665cd0c720df79ad04e..376b84e54ad7e8bbb48d039d354c03748665451c 100644 (file)
@@ -1,7 +1,7 @@
 /******************************************************************************
  *
- * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * Portions of this file are derived from the ipw3945 project, as well
  * as portions of the ieee80211 subsystem header files.
@@ -320,7 +320,7 @@ struct iwl_trans_pcie {
 
        /*protect hw register */
        spinlock_t reg_lock;
-       bool cmd_in_flight;
+       bool cmd_hold_nic_awake;
        bool ref_cmd_in_flight;
 
        /* protect ref counter */
index 2de8fbfe4edf4d6c6997307fb91052177fd7e6e4..dc179094e6a0d440b2aa29909c05adbc07f3f6b5 100644 (file)
@@ -5,8 +5,8 @@
  *
  * GPL LICENSE SUMMARY
  *
- * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2007 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of version 2 of the GNU General Public License as
@@ -31,8 +31,8 @@
  *
  * BSD LICENSE
  *
- * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
- * Copyright(c) 2013 - 2014 Intel Mobile Communications GmbH
+ * Copyright(c) 2005 - 2015 Intel Corporation. All rights reserved.
+ * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
  * All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
@@ -104,7 +104,7 @@ static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
 static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-       struct page *page;
+       struct page *page = NULL;
        dma_addr_t phys;
        u32 size;
        u8 power;
@@ -131,6 +131,7 @@ static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans)
                                    DMA_FROM_DEVICE);
                if (dma_mapping_error(trans->dev, phys)) {
                        __free_pages(page, order);
+                       page = NULL;
                        continue;
                }
                IWL_INFO(trans,
@@ -1020,7 +1021,7 @@ static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
        iwl_pcie_tx_start(trans, scd_addr);
 }
 
-static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
+static void iwl_trans_pcie_stop_device(struct iwl_trans *trans, bool low_power)
 {
        struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
        bool hw_rfkill, was_hw_rfkill;
@@ -1048,9 +1049,11 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
                iwl_pcie_rx_stop(trans);
 
                /* Power-down device's busmaster DMA clocks */
-               iwl_write_prph(trans, APMG_CLK_DIS_REG,
-                              APMG_CLK_VAL_DMA_CLK_RQT);
-               udelay(5);
+               if (trans->cfg->device_family != IWL_DEVICE_FAMILY_8000) {
+                       iwl_write_prph(trans, APMG_CLK_DIS_REG,
+                                      APMG_CLK_VAL_DMA_CLK_RQT);
+                       udelay(5);
+               }
        }
 
        /* Make sure (redundant) we've released our request to stay awake */
@@ -1115,7 +1118,7 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
 {
        if (iwl_op_mode_hw_rf_kill(trans->op_mode, state))
-               iwl_trans_pcie_stop_device(trans);
+               iwl_trans_pcie_stop_device(trans, true);
 }
 
 static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
@@ -1200,7 +1203,7 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
        return 0;
 }
 
-static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
+static int iwl_trans_pcie_start_hw(struct iwl_trans *trans, bool low_power)
 {
        bool hw_rfkill;
        int err;
@@ -1369,7 +1372,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
 
        spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
 
-       if (trans_pcie->cmd_in_flight)
+       if (trans_pcie->cmd_hold_nic_awake)
                goto out;
 
        /* this bit wakes up the NIC */
@@ -1435,7 +1438,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
         */
        __acquire(&trans_pcie->reg_lock);
 
-       if (trans_pcie->cmd_in_flight)
+       if (trans_pcie->cmd_hold_nic_awake)
                goto out;
 
        __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
index 06952aadfd7b5d4dccfff9f9689cd804031ab0eb..5ef8044c2ea3ed7317870902168c71be936cd8df 100644 (file)
@@ -1039,18 +1039,14 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
                iwl_trans_pcie_ref(trans);
        }
 
-       if (trans_pcie->cmd_in_flight)
-               return 0;
-
-       trans_pcie->cmd_in_flight = true;
-
        /*
         * wake up the NIC to make sure that the firmware will see the host
         * command - we will let the NIC sleep once all the host commands
         * returned. This needs to be done only on NICs that have
         * apmg_wake_up_wa set.
         */
-       if (trans->cfg->base_params->apmg_wake_up_wa) {
+       if (trans->cfg->base_params->apmg_wake_up_wa &&
+           !trans_pcie->cmd_hold_nic_awake) {
                __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
                                         CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
                if (trans->cfg->device_family == IWL_DEVICE_FAMILY_8000)
@@ -1064,10 +1060,10 @@ static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
                if (ret < 0) {
                        __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
                                        CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-                       trans_pcie->cmd_in_flight = false;
                        IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
                        return -EIO;
                }
+               trans_pcie->cmd_hold_nic_awake = true;
        }
 
        return 0;
@@ -1085,15 +1081,14 @@ static int iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
                iwl_trans_pcie_unref(trans);
        }
 
-       if (WARN_ON(!trans_pcie->cmd_in_flight))
-               return 0;
-
-       trans_pcie->cmd_in_flight = false;
+       if (trans->cfg->base_params->apmg_wake_up_wa) {
+               if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
+                       return 0;
 
-       if (trans->cfg->base_params->apmg_wake_up_wa)
+               trans_pcie->cmd_hold_nic_awake = false;
                __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
-                                       CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-
+                                          CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+       }
        return 0;
 }
 
index f0188c83c79f7d6027bdee6372768d3657347a2a..2721cf89fb160f0d3f4e6106474c5da101707a18 100644 (file)
@@ -126,7 +126,7 @@ static int _usbctrl_vendorreq_sync_read(struct usb_device *udev, u8 request,
 
        do {
                status = usb_control_msg(udev, pipe, request, reqtype, value,
-                                        index, pdata, len, 0); /*max. timeout*/
+                                        index, pdata, len, 1000);
                if (status < 0) {
                        /* firmware download is checksumed, don't retry */
                        if ((value >= FW_8192C_START_ADDRESS &&
index 4de46aa61d958fb9c5a1ae9d1ec3c0a0e48acdd4..0d2594395ffbc797671711603461148270f1a03f 100644 (file)
@@ -1250,7 +1250,7 @@ static void xenvif_tx_build_gops(struct xenvif_queue *queue,
                        netdev_err(queue->vif->dev,
                                   "txreq.offset: %x, size: %u, end: %lu\n",
                                   txreq.offset, txreq.size,
-                                  (txreq.offset&~PAGE_MASK) + txreq.size);
+                                  (unsigned long)(txreq.offset&~PAGE_MASK) + txreq.size);
                        xenvif_fatal_tx_err(queue->vif);
                        break;
                }
index 3d8dbf5f2d396aa8dde8745afe98d8b4f9febd67..968787abf78d454166561e0c79f9f7421dad931d 100644 (file)
@@ -34,6 +34,8 @@ struct backend_info {
        enum xenbus_state frontend_state;
        struct xenbus_watch hotplug_status_watch;
        u8 have_hotplug_status_watch:1;
+
+       const char *hotplug_script;
 };
 
 static int connect_rings(struct backend_info *be, struct xenvif_queue *queue);
@@ -238,6 +240,7 @@ static int netback_remove(struct xenbus_device *dev)
                xenvif_free(be->vif);
                be->vif = NULL;
        }
+       kfree(be->hotplug_script);
        kfree(be);
        dev_set_drvdata(&dev->dev, NULL);
        return 0;
@@ -255,6 +258,7 @@ static int netback_probe(struct xenbus_device *dev,
        struct xenbus_transaction xbt;
        int err;
        int sg;
+       const char *script;
        struct backend_info *be = kzalloc(sizeof(struct backend_info),
                                          GFP_KERNEL);
        if (!be) {
@@ -347,6 +351,15 @@ static int netback_probe(struct xenbus_device *dev,
        if (err)
                pr_debug("Error writing multi-queue-max-queues\n");
 
+       script = xenbus_read(XBT_NIL, dev->nodename, "script", NULL);
+       if (IS_ERR(script)) {
+               err = PTR_ERR(script);
+               xenbus_dev_fatal(dev, err, "reading script");
+               goto fail;
+       }
+
+       be->hotplug_script = script;
+
        err = xenbus_switch_state(dev, XenbusStateInitWait);
        if (err)
                goto fail;
@@ -379,22 +392,14 @@ static int netback_uevent(struct xenbus_device *xdev,
                          struct kobj_uevent_env *env)
 {
        struct backend_info *be = dev_get_drvdata(&xdev->dev);
-       char *val;
 
-       val = xenbus_read(XBT_NIL, xdev->nodename, "script", NULL);
-       if (IS_ERR(val)) {
-               int err = PTR_ERR(val);
-               xenbus_dev_fatal(xdev, err, "reading script");
-               return err;
-       } else {
-               if (add_uevent_var(env, "script=%s", val)) {
-                       kfree(val);
-                       return -ENOMEM;
-               }
-               kfree(val);
-       }
+       if (!be)
+               return 0;
 
-       if (!be || !be->vif)
+       if (add_uevent_var(env, "script=%s", be->hotplug_script))
+               return -ENOMEM;
+
+       if (!be->vif)
                return 0;
 
        return add_uevent_var(env, "vif=%s", be->vif->dev->name);
@@ -793,6 +798,7 @@ static void connect(struct backend_info *be)
                        goto err;
                }
 
+               queue->credit_bytes = credit_bytes;
                queue->remaining_credit = credit_bytes;
                queue->credit_usec = credit_usec;
 
index 3f45afd4382e164053dac1231978e91a5af6dbe0..e031c943286ef3f7765e42640397626d7555607c 100644 (file)
@@ -1698,6 +1698,7 @@ static void xennet_destroy_queues(struct netfront_info *info)
 
                if (netif_running(info->netdev))
                        napi_disable(&queue->napi);
+               del_timer_sync(&queue->rx_refill_timer);
                netif_napi_del(&queue->napi);
        }
 
@@ -2102,9 +2103,6 @@ static const struct attribute_group xennet_dev_group = {
 static int xennet_remove(struct xenbus_device *dev)
 {
        struct netfront_info *info = dev_get_drvdata(&dev->dev);
-       unsigned int num_queues = info->netdev->real_num_tx_queues;
-       struct netfront_queue *queue = NULL;
-       unsigned int i = 0;
 
        dev_dbg(&dev->dev, "%s\n", dev->nodename);
 
@@ -2112,16 +2110,7 @@ static int xennet_remove(struct xenbus_device *dev)
 
        unregister_netdev(info->netdev);
 
-       for (i = 0; i < num_queues; ++i) {
-               queue = &info->queues[i];
-               del_timer_sync(&queue->rx_refill_timer);
-       }
-
-       if (num_queues) {
-               kfree(info->queues);
-               info->queues = NULL;
-       }
-
+       xennet_destroy_queues(info);
        xennet_free_netdev(info->netdev);
 
        return 0;
index cd29b1038c5e3bf6f4a21659343c65584c44b969..15f9b7c9e4d38e93a52864a953e12d4172602797 100644 (file)
@@ -1660,6 +1660,7 @@ static int ntb_atom_detect(struct ntb_device *ndev)
        u32 ppd;
 
        ndev->hw_type = BWD_HW;
+       ndev->limits.max_mw = BWD_MAX_MW;
 
        rc = pci_read_config_dword(ndev->pdev, NTB_PPD_OFFSET, &ppd);
        if (rc)
@@ -1778,7 +1779,7 @@ static int ntb_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
                        dev_warn(&pdev->dev, "Cannot remap BAR %d\n",
                                 MW_TO_BAR(i));
                        rc = -EIO;
-                       goto err3;
+                       goto err4;
                }
        }
 
index 99764db0875aa0e1b34ca348ca1606c2a8990258..f0650265febf95cc6a37d03dd3d5b38b0d7370af 100644 (file)
@@ -189,7 +189,7 @@ int __of_attach_node_sysfs(struct device_node *np)
        return 0;
 }
 
-static int __init of_init(void)
+void __init of_core_init(void)
 {
        struct device_node *np;
 
@@ -198,7 +198,8 @@ static int __init of_init(void)
        of_kset = kset_create_and_add("devicetree", NULL, firmware_kobj);
        if (!of_kset) {
                mutex_unlock(&of_mutex);
-               return -ENOMEM;
+               pr_err("devicetree: failed to register existing nodes\n");
+               return;
        }
        for_each_of_allnodes(np)
                __of_attach_node_sysfs(np);
@@ -207,10 +208,7 @@ static int __init of_init(void)
        /* Symlink in /proc as required by userspace ABI */
        if (of_root)
                proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base");
-
-       return 0;
 }
-core_initcall(of_init);
 
 static struct property *__of_find_property(const struct device_node *np,
                                           const char *name, int *lenp)
index 3351ef408125d757f52ac772700687ef7f735c06..53826b84e0ec6d46d3699705f46216070a471867 100644 (file)
@@ -225,7 +225,7 @@ void __of_attach_node(struct device_node *np)
        phandle = __of_get_property(np, "phandle", &sz);
        if (!phandle)
                phandle = __of_get_property(np, "linux,phandle", &sz);
-       if (IS_ENABLED(PPC_PSERIES) && !phandle)
+       if (IS_ENABLED(CONFIG_PPC_PSERIES) && !phandle)
                phandle = __of_get_property(np, "ibm,phandle", &sz);
        np->phandle = (phandle && (sz >= 4)) ? be32_to_cpup(phandle) : 0;
 
index 8be2096c842390f5ecefc24186267e9ce4f61e2b..deeaed54422246dceb236f0a604696d5cddb2549 100644 (file)
@@ -348,7 +348,7 @@ int superio_fixup_irq(struct pci_dev *pcidev)
                BUG();
                return -1;
        }
-       printk("superio_fixup_irq(%s) ven 0x%x dev 0x%x from %pf\n",
+       printk(KERN_DEBUG "superio_fixup_irq(%s) ven 0x%x dev 0x%x from %ps\n",
                pci_name(pcidev),
                pcidev->vendor, pcidev->device,
                __builtin_return_address(0));
index a94dd2c4183a0ddc7ac118b1cfc41a7014d2fc4c..7eb4109a3df4eb6941f6c4c8ee435e7bb98f7747 100644 (file)
  */
 static DEFINE_SPINLOCK(ht_irq_lock);
 
-struct ht_irq_cfg {
-       struct pci_dev *dev;
-        /* Update callback used to cope with buggy hardware */
-       ht_irq_update_t *update;
-       unsigned pos;
-       unsigned idx;
-       struct ht_irq_msg msg;
-};
-
-
 void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
 {
        struct ht_irq_cfg *cfg = irq_get_handler_data(irq);
        unsigned long flags;
+
        spin_lock_irqsave(&ht_irq_lock, flags);
        if (cfg->msg.address_lo != msg->address_lo) {
                pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx);
@@ -55,6 +46,7 @@ void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
 void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg)
 {
        struct ht_irq_cfg *cfg = irq_get_handler_data(irq);
+
        *msg = cfg->msg;
 }
 
@@ -86,7 +78,6 @@ void unmask_ht_irq(struct irq_data *data)
  */
 int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
 {
-       struct ht_irq_cfg *cfg;
        int max_irq, pos, irq;
        unsigned long flags;
        u32 data;
@@ -105,29 +96,9 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update)
        if (idx > max_irq)
                return -EINVAL;
 
-       cfg = kmalloc(sizeof(*cfg), GFP_KERNEL);
-       if (!cfg)
-               return -ENOMEM;
-
-       cfg->dev = dev;
-       cfg->update = update;
-       cfg->pos = pos;
-       cfg->idx = 0x10 + (idx * 2);
-       /* Initialize msg to a value that will never match the first write. */
-       cfg->msg.address_lo = 0xffffffff;
-       cfg->msg.address_hi = 0xffffffff;
-
-       irq = irq_alloc_hwirq(dev_to_node(&dev->dev));
-       if (!irq) {
-               kfree(cfg);
-               return -EBUSY;
-       }
-       irq_set_handler_data(irq, cfg);
-
-       if (arch_setup_ht_irq(irq, dev) < 0) {
-               ht_destroy_irq(irq);
-               return -EBUSY;
-       }
+       irq = arch_setup_ht_irq(idx, pos, dev, update);
+       if (irq > 0)
+               dev_dbg(&dev->dev, "irq %d for HT\n", irq);
 
        return irq;
 }
@@ -158,13 +129,6 @@ EXPORT_SYMBOL(ht_create_irq);
  */
 void ht_destroy_irq(unsigned int irq)
 {
-       struct ht_irq_cfg *cfg;
-
-       cfg = irq_get_handler_data(irq);
-       irq_set_chip(irq, NULL);
-       irq_set_handler_data(irq, NULL);
-       irq_free_hwirq(irq);
-
-       kfree(cfg);
+       arch_teardown_ht_irq(irq);
 }
 EXPORT_SYMBOL(ht_destroy_irq);
index c6dc1dfd25d55ea9ac536634143eafcf196ce2c2..2890ad7cf7c63f0533811e21e67fea2c4cc25585 100644 (file)
@@ -819,13 +819,6 @@ static void quirk_amd_ioapic(struct pci_dev *dev)
        }
 }
 DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD,     PCI_DEVICE_ID_AMD_VIPER_7410,   quirk_amd_ioapic);
-
-static void quirk_ioapic_rmw(struct pci_dev *dev)
-{
-       if (dev->devfn == 0 && dev->bus->number == 0)
-               sis_apic_bug = 1;
-}
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI,      PCI_ANY_ID,                     quirk_ioapic_rmw);
 #endif /* CONFIG_X86_IO_APIC */
 
 /*
index 4fd0cacf7ca0ae0dfaebf5c612f457cdf6fa43f9..508cc56130e3f88d1b01716a7a00fead250fdf1c 100644 (file)
@@ -428,16 +428,19 @@ static void __assign_resources_sorted(struct list_head *head,
                 * consistent.
                 */
                if (add_align > dev_res->res->start) {
+                       resource_size_t r_size = resource_size(dev_res->res);
+
                        dev_res->res->start = add_align;
-                       dev_res->res->end = add_align +
-                                           resource_size(dev_res->res);
+                       dev_res->res->end = add_align + r_size - 1;
 
                        list_for_each_entry(dev_res2, head, list) {
                                align = pci_resource_alignment(dev_res2->dev,
                                                               dev_res2->res);
-                               if (add_align > align)
+                               if (add_align > align) {
                                        list_move_tail(&dev_res->list,
                                                       &dev_res2->list);
+                                       break;
+                               }
                        }
                }
 
index a53bd5b52df97ff48fa921a5009f2fa6937aa377..fc9b9f0ea91e8132b08c85478a592e3f820fc2cc 100644 (file)
@@ -38,7 +38,9 @@ config ARMADA375_USBCLUSTER_PHY
 config PHY_DM816X_USB
        tristate "TI dm816x USB PHY driver"
        depends on ARCH_OMAP2PLUS
+       depends on USB_SUPPORT
        select GENERIC_PHY
+       select USB_PHY
        help
          Enable this for dm816x USB to work.
 
@@ -97,8 +99,9 @@ config OMAP_CONTROL_PHY
 config OMAP_USB2
        tristate "OMAP USB2 PHY Driver"
        depends on ARCH_OMAP2PLUS
-       depends on USB_PHY
+       depends on USB_SUPPORT
        select GENERIC_PHY
+       select USB_PHY
        select OMAP_CONTROL_PHY
        depends on OMAP_OCP2SCP
        help
@@ -122,8 +125,9 @@ config TI_PIPE3
 config TWL4030_USB
        tristate "TWL4030 USB Transceiver Driver"
        depends on TWL4030_CORE && REGULATOR_TWL4030 && USB_MUSB_OMAP2PLUS
-       depends on USB_PHY
+       depends on USB_SUPPORT
        select GENERIC_PHY
+       select USB_PHY
        help
          Enable this to support the USB OTG transceiver on TWL4030
          family chips (including the TWL5030 and TPS659x0 devices).
@@ -304,7 +308,7 @@ config PHY_STIH41X_USB
 
 config PHY_QCOM_UFS
        tristate "Qualcomm UFS PHY driver"
-       depends on OF && ARCH_MSM
+       depends on OF && ARCH_QCOM
        select GENERIC_PHY
        help
          Support for UFS PHY on QCOM chipsets.
index 3791838f4bd4b14e145dd5718a3030c4b89d9f3b..63bc12d7a73e561a8e967ac4fb7f453c9a0d23ab 100644 (file)
@@ -530,7 +530,7 @@ struct phy *phy_optional_get(struct device *dev, const char *string)
 {
        struct phy *phy = phy_get(dev, string);
 
-       if (PTR_ERR(phy) == -ENODEV)
+       if (IS_ERR(phy) && (PTR_ERR(phy) == -ENODEV))
                phy = NULL;
 
        return phy;
@@ -584,7 +584,7 @@ struct phy *devm_phy_optional_get(struct device *dev, const char *string)
 {
        struct phy *phy = devm_phy_get(dev, string);
 
-       if (PTR_ERR(phy) == -ENODEV)
+       if (IS_ERR(phy) && (PTR_ERR(phy) == -ENODEV))
                phy = NULL;
 
        return phy;
index 183ef43681016ba0f238edfa98bbbef3684ab543..c1a468686bdc72433b7596512cb70852f3ef2420 100644 (file)
@@ -275,6 +275,7 @@ static int omap_usb2_probe(struct platform_device *pdev)
                phy->wkupclk = devm_clk_get(phy->dev, "usb_phy_cm_clk32k");
                if (IS_ERR(phy->wkupclk)) {
                        dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n");
+                       pm_runtime_disable(phy->dev);
                        return PTR_ERR(phy->wkupclk);
                } else {
                        dev_warn(&pdev->dev,
index 778276aba3aa0092d8e8e7bc2de15eae4f5a5a15..97d45f47d1ade847f9f0d7462d0ae91e91505974 100644 (file)
@@ -23,7 +23,7 @@
 #define USBHS_LPSTS                    0x02
 #define USBHS_UGCTRL                   0x80
 #define USBHS_UGCTRL2                  0x84
-#define USBHS_UGSTS                    0x88    /* The manuals have 0x90 */
+#define USBHS_UGSTS                    0x88    /* From technical update */
 
 /* Low Power Status register (LPSTS) */
 #define USBHS_LPSTS_SUSPM              0x4000
@@ -41,7 +41,7 @@
 #define USBHS_UGCTRL2_USB0SEL_HS_USB   0x00000030
 
 /* USB General status register (UGSTS) */
-#define USBHS_UGSTS_LOCK               0x00000300 /* The manuals have 0x3 */
+#define USBHS_UGSTS_LOCK               0x00000100 /* From technical update */
 
 #define PHYS_PER_CHANNEL       2
 
index 4ad5c1a996e3e906023bbe246c98a258ec3bba23..e406e3d8c1c71713e08ceb440e43900fbbb5b8be 100644 (file)
@@ -643,7 +643,9 @@ static const struct cygnus_gpio_pin_range cygnus_gpio_pintable[] = {
        CYGNUS_PINRANGE(87, 104, 12),
        CYGNUS_PINRANGE(99, 102, 2),
        CYGNUS_PINRANGE(101, 90, 4),
-       CYGNUS_PINRANGE(105, 116, 10),
+       CYGNUS_PINRANGE(105, 116, 6),
+       CYGNUS_PINRANGE(111, 100, 2),
+       CYGNUS_PINRANGE(113, 122, 4),
        CYGNUS_PINRANGE(123, 11, 1),
        CYGNUS_PINRANGE(124, 38, 4),
        CYGNUS_PINRANGE(128, 43, 1),
index 89dca77ca0382e93909188cad63ccd9ef6bff41b..18ee2089df4ae84e7edb6f665146be84ca43b83f 100644 (file)
@@ -1110,7 +1110,7 @@ void devm_pinctrl_put(struct pinctrl *p)
 EXPORT_SYMBOL_GPL(devm_pinctrl_put);
 
 int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
-                        bool dup, bool locked)
+                        bool dup)
 {
        int i, ret;
        struct pinctrl_maps *maps_node;
@@ -1178,11 +1178,9 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
                maps_node->maps = maps;
        }
 
-       if (!locked)
-               mutex_lock(&pinctrl_maps_mutex);
+       mutex_lock(&pinctrl_maps_mutex);
        list_add_tail(&maps_node->node, &pinctrl_maps);
-       if (!locked)
-               mutex_unlock(&pinctrl_maps_mutex);
+       mutex_unlock(&pinctrl_maps_mutex);
 
        return 0;
 }
@@ -1197,7 +1195,7 @@ int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
 int pinctrl_register_mappings(struct pinctrl_map const *maps,
                              unsigned num_maps)
 {
-       return pinctrl_register_map(maps, num_maps, true, false);
+       return pinctrl_register_map(maps, num_maps, true);
 }
 
 void pinctrl_unregister_map(struct pinctrl_map const *map)
index 75476b3d87dafe00c6273931a4e3a137f1509dfa..b24ea846c8677ebea49ffd435c04c3de40f226b9 100644 (file)
@@ -183,7 +183,7 @@ static inline struct pin_desc *pin_desc_get(struct pinctrl_dev *pctldev,
 }
 
 int pinctrl_register_map(struct pinctrl_map const *maps, unsigned num_maps,
-                        bool dup, bool locked);
+                        bool dup);
 void pinctrl_unregister_map(struct pinctrl_map const *map);
 
 extern int pinctrl_force_sleep(struct pinctrl_dev *pctldev);
index eda13de2e7c0d110f5e105a84b5b19da93cadc36..0bbf7d71b2811242a5a69db9fe97389c1acde5e8 100644 (file)
@@ -92,7 +92,7 @@ static int dt_remember_or_free_map(struct pinctrl *p, const char *statename,
        dt_map->num_maps = num_maps;
        list_add_tail(&dt_map->node, &p->dt_maps);
 
-       return pinctrl_register_map(map, num_maps, false, true);
+       return pinctrl_register_map(map, num_maps, false);
 }
 
 struct pinctrl_dev *of_pinctrl_get(struct device_node *np)
index 82f691eeeec4d82cd5e75b7a96be719befbcd57f..732ff757a95fe12fe7b5ddca1714a8e527017e90 100644 (file)
@@ -1292,6 +1292,49 @@ static void chv_gpio_irq_unmask(struct irq_data *d)
        chv_gpio_irq_mask_unmask(d, false);
 }
 
+static unsigned chv_gpio_irq_startup(struct irq_data *d)
+{
+       /*
+        * Check if the interrupt has been requested with 0 as triggering
+        * type. In that case it is assumed that the current values
+        * programmed to the hardware are used (e.g BIOS configured
+        * defaults).
+        *
+        * In that case ->irq_set_type() will never be called so we need to
+        * read back the values from hardware now, set correct flow handler
+        * and update mappings before the interrupt is being used.
+        */
+       if (irqd_get_trigger_type(d) == IRQ_TYPE_NONE) {
+               struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
+               struct chv_pinctrl *pctrl = gpiochip_to_pinctrl(gc);
+               unsigned offset = irqd_to_hwirq(d);
+               int pin = chv_gpio_offset_to_pin(pctrl, offset);
+               irq_flow_handler_t handler;
+               unsigned long flags;
+               u32 intsel, value;
+
+               intsel = readl(chv_padreg(pctrl, pin, CHV_PADCTRL0));
+               intsel &= CHV_PADCTRL0_INTSEL_MASK;
+               intsel >>= CHV_PADCTRL0_INTSEL_SHIFT;
+
+               value = readl(chv_padreg(pctrl, pin, CHV_PADCTRL1));
+               if (value & CHV_PADCTRL1_INTWAKECFG_LEVEL)
+                       handler = handle_level_irq;
+               else
+                       handler = handle_edge_irq;
+
+               spin_lock_irqsave(&pctrl->lock, flags);
+               if (!pctrl->intr_lines[intsel]) {
+                       __irq_set_handler_locked(d->irq, handler);
+                       pctrl->intr_lines[intsel] = offset;
+               }
+               spin_unlock_irqrestore(&pctrl->lock, flags);
+       }
+
+       chv_gpio_irq_unmask(d);
+       return 0;
+}
+
 static int chv_gpio_irq_type(struct irq_data *d, unsigned type)
 {
        struct gpio_chip *gc = irq_data_get_irq_chip_data(d);
@@ -1357,6 +1400,7 @@ static int chv_gpio_irq_type(struct irq_data *d, unsigned type)
 
 static struct irq_chip chv_gpio_irqchip = {
        .name = "chv-gpio",
+       .irq_startup = chv_gpio_irq_startup,
        .irq_ack = chv_gpio_irq_ack,
        .irq_mask = chv_gpio_irq_mask,
        .irq_unmask = chv_gpio_irq_unmask,
index 493294c0ebe6faccf0dc8c0e5f15b98ddf9d76b0..474812e2b0cb97c806402fda486ab2e883398c06 100644 (file)
@@ -881,6 +881,8 @@ static int mtk_gpio_set_debounce(struct gpio_chip *chip, unsigned offset,
        if (!mtk_eint_get_mask(pctl, eint_num)) {
                mtk_eint_mask(d);
                unmask = 1;
+       } else {
+               unmask = 0;
        }
 
        clr_bit = 0xff << eint_offset;
index edcd140e089968e0f7b95fef4ffcd82f157f8294..a70a5fe79d44d343b0e1830ccd36fe7b6384d314 100644 (file)
@@ -569,7 +569,7 @@ static int meson_gpiolib_register(struct meson_pinctrl *pc)
                domain->chip.direction_output = meson_gpio_direction_output;
                domain->chip.get = meson_gpio_get;
                domain->chip.set = meson_gpio_set;
-               domain->chip.base = -1;
+               domain->chip.base = domain->data->pin_base;
                domain->chip.ngpio = domain->data->num_pins;
                domain->chip.can_sleep = false;
                domain->chip.of_node = domain->of_node;
index 2f7ea62298801c2a5b9b87605a3fe5fbb0ca00bd..9677807db364d70ee4512799e26449bccba56a08 100644 (file)
@@ -876,13 +876,13 @@ static struct meson_domain_data meson8b_domain_data[] = {
                .banks          = meson8b_banks,
                .num_banks      = ARRAY_SIZE(meson8b_banks),
                .pin_base       = 0,
-               .num_pins       = 83,
+               .num_pins       = 130,
        },
        {
                .name           = "ao-bank",
                .banks          = meson8b_ao_banks,
                .num_banks      = ARRAY_SIZE(meson8b_ao_banks),
-               .pin_base       = 83,
+               .pin_base       = 130,
                .num_pins       = 16,
        },
 };
index 42f930f70de31e9086d4d7f7fec78a47d83cbfcc..03aa58c4cb85bd04cb4b043f09a1e9aec5f19cc1 100644 (file)
@@ -364,7 +364,7 @@ static struct mvebu_mpp_mode mv88f6710_mpp_modes[] = {
           MPP_FUNCTION(0x5, "audio", "mclk"),
           MPP_FUNCTION(0x6, "uart0", "cts")),
        MPP_MODE(63,
-          MPP_FUNCTION(0x0, "gpo", NULL),
+          MPP_FUNCTION(0x0, "gpio", NULL),
           MPP_FUNCTION(0x1, "spi0", "sck"),
           MPP_FUNCTION(0x2, "tclk", NULL)),
        MPP_MODE(64,
index b2d22218a2582f94b2c5d0274843fa0c2d3d7162..ae4115e4b4efc676c69cb5f19c5a92956fed6507 100644 (file)
@@ -260,6 +260,7 @@ static int pmic_gpio_set_mux(struct pinctrl_dev *pctldev, unsigned function,
                        val = 1;
        }
 
+       val = val << PMIC_GPIO_REG_MODE_DIR_SHIFT;
        val |= pad->function << PMIC_GPIO_REG_MODE_FUNCTION_SHIFT;
        val |= pad->out_value & PMIC_GPIO_REG_MODE_VALUE_SHIFT;
 
@@ -417,7 +418,7 @@ static int pmic_gpio_config_set(struct pinctrl_dev *pctldev, unsigned int pin,
                return ret;
 
        val = pad->buffer_type << PMIC_GPIO_REG_OUT_TYPE_SHIFT;
-       val = pad->strength << PMIC_GPIO_REG_OUT_STRENGTH_SHIFT;
+       val |= pad->strength << PMIC_GPIO_REG_OUT_STRENGTH_SHIFT;
 
        ret = pmic_gpio_write(state, pad, PMIC_GPIO_REG_DIG_OUT_CTL, val);
        if (ret < 0)
@@ -466,12 +467,13 @@ static void pmic_gpio_config_dbg_show(struct pinctrl_dev *pctldev,
                seq_puts(s, " ---");
        } else {
 
-               if (!pad->input_enabled) {
+               if (pad->input_enabled) {
                        ret = pmic_gpio_read(state, pad, PMIC_MPP_REG_RT_STS);
-                       if (!ret) {
-                               ret &= PMIC_MPP_REG_RT_STS_VAL_MASK;
-                               pad->out_value = ret;
-                       }
+                       if (ret < 0)
+                               return;
+
+                       ret &= PMIC_MPP_REG_RT_STS_VAL_MASK;
+                       pad->out_value = ret;
                }
 
                seq_printf(s, " %-4s", pad->output_enabled ? "out" : "in");
index 8f36c5f9194903fd8433736499a03a61afbfb2ac..211b942ad6d544ade10ea2fa91c54e5210efa290 100644 (file)
@@ -370,6 +370,7 @@ static int pmic_mpp_set_mux(struct pinctrl_dev *pctldev, unsigned function,
                }
        }
 
+       val = val << PMIC_MPP_REG_MODE_DIR_SHIFT;
        val |= pad->function << PMIC_MPP_REG_MODE_FUNCTION_SHIFT;
        val |= pad->out_value & PMIC_MPP_REG_MODE_VALUE_MASK;
 
@@ -576,10 +577,11 @@ static void pmic_mpp_config_dbg_show(struct pinctrl_dev *pctldev,
 
                if (pad->input_enabled) {
                        ret = pmic_mpp_read(state, pad, PMIC_MPP_REG_RT_STS);
-                       if (!ret) {
-                               ret &= PMIC_MPP_REG_RT_STS_VAL_MASK;
-                               pad->out_value = ret;
-                       }
+                       if (ret < 0)
+                               return;
+
+                       ret &= PMIC_MPP_REG_RT_STS_VAL_MASK;
+                       pad->out_value = ret;
                }
 
                seq_printf(s, " %-4s", pad->output_enabled ? "out" : "in");
index 9bb9ad6d4a1b1b1c3600cd283f240d2479a80841..28f328136f0df78fe3253d6996ae9108229f87e5 100644 (file)
@@ -2897,7 +2897,7 @@ static ssize_t hotkey_wakeup_reason_show(struct device *dev,
        return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_wakeup_reason);
 }
 
-static DEVICE_ATTR_RO(hotkey_wakeup_reason);
+static DEVICE_ATTR(wakeup_reason, S_IRUGO, hotkey_wakeup_reason_show, NULL);
 
 static void hotkey_wakeup_reason_notify_change(void)
 {
@@ -2913,7 +2913,8 @@ static ssize_t hotkey_wakeup_hotunplug_complete_show(struct device *dev,
        return snprintf(buf, PAGE_SIZE, "%d\n", hotkey_autosleep_ack);
 }
 
-static DEVICE_ATTR_RO(hotkey_wakeup_hotunplug_complete);
+static DEVICE_ATTR(wakeup_hotunplug_complete, S_IRUGO,
+                  hotkey_wakeup_hotunplug_complete_show, NULL);
 
 static void hotkey_wakeup_hotunplug_complete_notify_change(void)
 {
@@ -2978,8 +2979,8 @@ static struct attribute *hotkey_attributes[] __initdata = {
        &dev_attr_hotkey_enable.attr,
        &dev_attr_hotkey_bios_enabled.attr,
        &dev_attr_hotkey_bios_mask.attr,
-       &dev_attr_hotkey_wakeup_reason.attr,
-       &dev_attr_hotkey_wakeup_hotunplug_complete.attr,
+       &dev_attr_wakeup_reason.attr,
+       &dev_attr_wakeup_hotunplug_complete.attr,
        &dev_attr_hotkey_mask.attr,
        &dev_attr_hotkey_all_mask.attr,
        &dev_attr_hotkey_recommended_mask.attr,
@@ -4393,12 +4394,13 @@ static ssize_t wan_enable_store(struct device *dev,
                        attr, buf, count);
 }
 
-static DEVICE_ATTR_RW(wan_enable);
+static DEVICE_ATTR(wwan_enable, S_IWUSR | S_IRUGO,
+                  wan_enable_show, wan_enable_store);
 
 /* --------------------------------------------------------------------- */
 
 static struct attribute *wan_attributes[] = {
-       &dev_attr_wan_enable.attr,
+       &dev_attr_wwan_enable.attr,
        NULL
 };
 
@@ -8138,7 +8140,8 @@ static ssize_t fan_pwm1_enable_store(struct device *dev,
        return count;
 }
 
-static DEVICE_ATTR_RW(fan_pwm1_enable);
+static DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO,
+                  fan_pwm1_enable_show, fan_pwm1_enable_store);
 
 /* sysfs fan pwm1 ------------------------------------------------------ */
 static ssize_t fan_pwm1_show(struct device *dev,
@@ -8198,7 +8201,7 @@ static ssize_t fan_pwm1_store(struct device *dev,
        return (rc) ? rc : count;
 }
 
-static DEVICE_ATTR_RW(fan_pwm1);
+static DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, fan_pwm1_show, fan_pwm1_store);
 
 /* sysfs fan fan1_input ------------------------------------------------ */
 static ssize_t fan_fan1_input_show(struct device *dev,
@@ -8215,7 +8218,7 @@ static ssize_t fan_fan1_input_show(struct device *dev,
        return snprintf(buf, PAGE_SIZE, "%u\n", speed);
 }
 
-static DEVICE_ATTR_RO(fan_fan1_input);
+static DEVICE_ATTR(fan1_input, S_IRUGO, fan_fan1_input_show, NULL);
 
 /* sysfs fan fan2_input ------------------------------------------------ */
 static ssize_t fan_fan2_input_show(struct device *dev,
@@ -8232,7 +8235,7 @@ static ssize_t fan_fan2_input_show(struct device *dev,
        return snprintf(buf, PAGE_SIZE, "%u\n", speed);
 }
 
-static DEVICE_ATTR_RO(fan_fan2_input);
+static DEVICE_ATTR(fan2_input, S_IRUGO, fan_fan2_input_show, NULL);
 
 /* sysfs fan fan_watchdog (hwmon driver) ------------------------------- */
 static ssize_t fan_fan_watchdog_show(struct device_driver *drv,
@@ -8265,8 +8268,8 @@ static DRIVER_ATTR(fan_watchdog, S_IWUSR | S_IRUGO,
 
 /* --------------------------------------------------------------------- */
 static struct attribute *fan_attributes[] = {
-       &dev_attr_fan_pwm1_enable.attr, &dev_attr_fan_pwm1.attr,
-       &dev_attr_fan_fan1_input.attr,
+       &dev_attr_pwm1_enable.attr, &dev_attr_pwm1.attr,
+       &dev_attr_fan1_input.attr,
        NULL, /* for fan2_input */
        NULL
 };
@@ -8400,7 +8403,7 @@ static int __init fan_init(struct ibm_init_struct *iibm)
                if (tp_features.second_fan) {
                        /* attach second fan tachometer */
                        fan_attributes[ARRAY_SIZE(fan_attributes)-2] =
-                                       &dev_attr_fan_fan2_input.attr;
+                                       &dev_attr_fan2_input.attr;
                }
                rc = sysfs_create_group(&tpacpi_sensors_pdev->dev.kobj,
                                         &fan_attr_group);
@@ -8848,7 +8851,7 @@ static ssize_t thinkpad_acpi_pdev_name_show(struct device *dev,
        return snprintf(buf, PAGE_SIZE, "%s\n", TPACPI_NAME);
 }
 
-static DEVICE_ATTR_RO(thinkpad_acpi_pdev_name);
+static DEVICE_ATTR(name, S_IRUGO, thinkpad_acpi_pdev_name_show, NULL);
 
 /* --------------------------------------------------------------------- */
 
@@ -9390,8 +9393,7 @@ static void thinkpad_acpi_module_exit(void)
                hwmon_device_unregister(tpacpi_hwmon);
 
        if (tp_features.sensors_pdev_attrs_registered)
-               device_remove_file(&tpacpi_sensors_pdev->dev,
-                                  &dev_attr_thinkpad_acpi_pdev_name);
+               device_remove_file(&tpacpi_sensors_pdev->dev, &dev_attr_name);
        if (tpacpi_sensors_pdev)
                platform_device_unregister(tpacpi_sensors_pdev);
        if (tpacpi_pdev)
@@ -9512,8 +9514,7 @@ static int __init thinkpad_acpi_module_init(void)
                thinkpad_acpi_module_exit();
                return ret;
        }
-       ret = device_create_file(&tpacpi_sensors_pdev->dev,
-                                &dev_attr_thinkpad_acpi_pdev_name);
+       ret = device_create_file(&tpacpi_sensors_pdev->dev, &dev_attr_name);
        if (ret) {
                pr_err("unable to create sysfs hwmon device attributes\n");
                thinkpad_acpi_module_exit();
index ca1cc5a47eb1e02b9acd38f1aeff2abf93bbf6ac..bd1dbfee2515dda65c54176d9b2e547cb4e3beca 100644 (file)
@@ -1149,6 +1149,7 @@ static struct platform_driver axp288_fuel_gauge_driver = {
 
 module_platform_driver(axp288_fuel_gauge_driver);
 
+MODULE_AUTHOR("Ramakrishna Pallala <ramakrishna.pallala@intel.com>");
 MODULE_AUTHOR("Todd Brandt <todd.e.brandt@linux.intel.com>");
 MODULE_DESCRIPTION("Xpower AXP288 Fuel Gauge Driver");
 MODULE_LICENSE("GPL");
index a57433de5c249fa33e6fb4f59e7c2dbf3190208b..b6b98378faa32b50c0e1900c76ca4e26da3bd6c1 100644 (file)
@@ -1109,6 +1109,14 @@ static void __exit bq27x00_battery_exit(void)
 }
 module_exit(bq27x00_battery_exit);
 
+#ifdef CONFIG_BATTERY_BQ27X00_PLATFORM
+MODULE_ALIAS("platform:bq27000-battery");
+#endif
+
+#ifdef CONFIG_BATTERY_BQ27X00_I2C
+MODULE_ALIAS("i2c:bq27000-battery");
+#endif
+
 MODULE_AUTHOR("Rodolfo Giometti <giometti@linux.it>");
 MODULE_DESCRIPTION("BQ27x00 battery monitor driver");
 MODULE_LICENSE("GPL");
index 2da9ed8ccbb5391f50c7137866b3fad5669dd2d7..8a971b3dbe583f0dc2640ddfa867f33c3e2c0ef4 100644 (file)
@@ -347,7 +347,7 @@ static int collie_bat_probe(struct ucb1x00_dev *dev)
                goto err_psy_reg_main;
        }
 
-       psy_main_cfg.drv_data = &collie_bat_bu;
+       psy_bu_cfg.drv_data = &collie_bat_bu;
        collie_bat_bu.psy = power_supply_register(&dev->ucb->dev,
                                                  &collie_bat_bu_desc,
                                                  &psy_bu_cfg);
index aad9c3318c02a271a864da8866b9b38c4623768c..17d93a73c5136e53ab2955d7aba02a7ef0c85d1f 100644 (file)
@@ -41,6 +41,7 @@ config POWER_RESET_AXXIA
 config POWER_RESET_BRCMSTB
        bool "Broadcom STB reset driver"
        depends on ARM || MIPS || COMPILE_TEST
+       depends on MFD_SYSCON
        default ARCH_BRCMSTB
        help
          This driver provides restart support for Broadcom STB boards.
index 01c7055c4200e3d9a7333ac7c0bcaccc3ac2d4a5..ca461ebc7ae8f73338059d6ce9547b850c269f1b 100644 (file)
@@ -212,9 +212,9 @@ static int at91_reset_platform_probe(struct platform_device *pdev)
                res = platform_get_resource(pdev, IORESOURCE_MEM, idx + 1 );
                at91_ramc_base[idx] = devm_ioremap(&pdev->dev, res->start,
                                                   resource_size(res));
-               if (IS_ERR(at91_ramc_base[idx])) {
+               if (!at91_ramc_base[idx]) {
                        dev_err(&pdev->dev, "Could not map ram controller address\n");
-                       return PTR_ERR(at91_ramc_base[idx]);
+                       return -ENOMEM;
                }
        }
 
index 7ef193b6f7fe81451c504797cec9bb4932879ee8..5f855f99bdfcdde73bef8e039f6820e6c256a92e 100644 (file)
@@ -120,18 +120,7 @@ static enum hrtimer_restart ltc2952_poweroff_timer_wde(struct hrtimer *timer)
 
 static void ltc2952_poweroff_start_wde(struct ltc2952_poweroff *data)
 {
-       if (hrtimer_start(&data->timer_wde, data->wde_interval,
-                         HRTIMER_MODE_REL)) {
-               /*
-                * The device will not toggle the watchdog reset,
-                * thus shut down is only safe if the PowerPath controller
-                * has a long enough time-off before triggering a hardware
-                * power-off.
-                *
-                * Only sending a warning as the system will power-off anyway
-                */
-               dev_err(data->dev, "unable to start the timer\n");
-       }
+       hrtimer_start(&data->timer_wde, data->wde_interval, HRTIMER_MODE_REL);
 }
 
 static enum hrtimer_restart
@@ -165,12 +154,10 @@ static irqreturn_t ltc2952_poweroff_handler(int irq, void *dev_id)
        }
 
        if (gpiod_get_value(data->gpio_trigger)) {
-               if (hrtimer_start(&data->timer_trigger, data->trigger_delay,
-                                 HRTIMER_MODE_REL))
-                       dev_err(data->dev, "unable to start the wait timer\n");
+               hrtimer_start(&data->timer_trigger, data->trigger_delay,
+                             HRTIMER_MODE_REL);
        } else {
                hrtimer_cancel(&data->timer_trigger);
-               /* omitting return value check, timer should have been valid */
        }
        return IRQ_HANDLED;
 }
index 476171a768d61def6d1f1e476ebc6db78c60839b..8a029f9bc18cb0f0c2c95bc7ea4d9167164326be 100644 (file)
@@ -16,6 +16,7 @@
 #include <linux/mfd/syscon.h>
 #include <linux/module.h>
 #include <linux/of.h>
+#include <linux/of_device.h>
 #include <linux/platform_device.h>
 #include <linux/pwm.h>
 #include <linux/regmap.h>
 #define PERIP_PWM_PDM_CONTROL_CH_MASK          0x1
 #define PERIP_PWM_PDM_CONTROL_CH_SHIFT(ch)     ((ch) * 4)
 
-#define MAX_TMBASE_STEPS                       65536
+/*
+ * PWM period is specified with a timebase register,
+ * in number of step periods. The PWM duty cycle is also
+ * specified in step periods, in the [0, $timebase] range.
+ * In other words, the timebase imposes the duty cycle
+ * resolution. Therefore, let's constraint the timebase to
+ * a minimum value to allow a sane range of duty cycle values.
+ * Imposing a minimum timebase, will impose a maximum PWM frequency.
+ *
+ * The value chosen is completely arbitrary.
+ */
+#define MIN_TMBASE_STEPS                       16
+
+struct img_pwm_soc_data {
+       u32 max_timebase;
+};
 
 struct img_pwm_chip {
        struct device   *dev;
@@ -47,6 +63,9 @@ struct img_pwm_chip {
        struct clk      *sys_clk;
        void __iomem    *base;
        struct regmap   *periph_regs;
+       int             max_period_ns;
+       int             min_period_ns;
+       const struct img_pwm_soc_data   *data;
 };
 
 static inline struct img_pwm_chip *to_img_pwm_chip(struct pwm_chip *chip)
@@ -72,24 +91,31 @@ static int img_pwm_config(struct pwm_chip *chip, struct pwm_device *pwm,
        u32 val, div, duty, timebase;
        unsigned long mul, output_clk_hz, input_clk_hz;
        struct img_pwm_chip *pwm_chip = to_img_pwm_chip(chip);
+       unsigned int max_timebase = pwm_chip->data->max_timebase;
+
+       if (period_ns < pwm_chip->min_period_ns ||
+           period_ns > pwm_chip->max_period_ns) {
+               dev_err(chip->dev, "configured period not in range\n");
+               return -ERANGE;
+       }
 
        input_clk_hz = clk_get_rate(pwm_chip->pwm_clk);
        output_clk_hz = DIV_ROUND_UP(NSEC_PER_SEC, period_ns);
 
        mul = DIV_ROUND_UP(input_clk_hz, output_clk_hz);
-       if (mul <= MAX_TMBASE_STEPS) {
+       if (mul <= max_timebase) {
                div = PWM_CTRL_CFG_NO_SUB_DIV;
                timebase = DIV_ROUND_UP(mul, 1);
-       } else if (mul <= MAX_TMBASE_STEPS * 8) {
+       } else if (mul <= max_timebase * 8) {
                div = PWM_CTRL_CFG_SUB_DIV0;
                timebase = DIV_ROUND_UP(mul, 8);
-       } else if (mul <= MAX_TMBASE_STEPS * 64) {
+       } else if (mul <= max_timebase * 64) {
                div = PWM_CTRL_CFG_SUB_DIV1;
                timebase = DIV_ROUND_UP(mul, 64);
-       } else if (mul <= MAX_TMBASE_STEPS * 512) {
+       } else if (mul <= max_timebase * 512) {
                div = PWM_CTRL_CFG_SUB_DIV0_DIV1;
                timebase = DIV_ROUND_UP(mul, 512);
-       } else if (mul > MAX_TMBASE_STEPS * 512) {
+       } else if (mul > max_timebase * 512) {
                dev_err(chip->dev,
                        "failed to configure timebase steps/divider value\n");
                return -EINVAL;
@@ -143,11 +169,27 @@ static const struct pwm_ops img_pwm_ops = {
        .owner = THIS_MODULE,
 };
 
+static const struct img_pwm_soc_data pistachio_pwm = {
+       .max_timebase = 255,
+};
+
+static const struct of_device_id img_pwm_of_match[] = {
+       {
+               .compatible = "img,pistachio-pwm",
+               .data = &pistachio_pwm,
+       },
+       { }
+};
+MODULE_DEVICE_TABLE(of, img_pwm_of_match);
+
 static int img_pwm_probe(struct platform_device *pdev)
 {
        int ret;
+       u64 val;
+       unsigned long clk_rate;
        struct resource *res;
        struct img_pwm_chip *pwm;
+       const struct of_device_id *of_dev_id;
 
        pwm = devm_kzalloc(&pdev->dev, sizeof(*pwm), GFP_KERNEL);
        if (!pwm)
@@ -160,6 +202,11 @@ static int img_pwm_probe(struct platform_device *pdev)
        if (IS_ERR(pwm->base))
                return PTR_ERR(pwm->base);
 
+       of_dev_id = of_match_device(img_pwm_of_match, &pdev->dev);
+       if (!of_dev_id)
+               return -ENODEV;
+       pwm->data = of_dev_id->data;
+
        pwm->periph_regs = syscon_regmap_lookup_by_phandle(pdev->dev.of_node,
                                                           "img,cr-periph");
        if (IS_ERR(pwm->periph_regs))
@@ -189,6 +236,17 @@ static int img_pwm_probe(struct platform_device *pdev)
                goto disable_sysclk;
        }
 
+       clk_rate = clk_get_rate(pwm->pwm_clk);
+
+       /* The maximum input clock divider is 512 */
+       val = (u64)NSEC_PER_SEC * 512 * pwm->data->max_timebase;
+       do_div(val, clk_rate);
+       pwm->max_period_ns = val;
+
+       val = (u64)NSEC_PER_SEC * MIN_TMBASE_STEPS;
+       do_div(val, clk_rate);
+       pwm->min_period_ns = val;
+
        pwm->chip.dev = &pdev->dev;
        pwm->chip.ops = &img_pwm_ops;
        pwm->chip.base = -1;
@@ -228,12 +286,6 @@ static int img_pwm_remove(struct platform_device *pdev)
        return pwmchip_remove(&pwm_chip->chip);
 }
 
-static const struct of_device_id img_pwm_of_match[] = {
-       { .compatible = "img,pistachio-pwm", },
-       { }
-};
-MODULE_DEVICE_TABLE(of, img_pwm_of_match);
-
 static struct platform_driver img_pwm_driver = {
        .driver = {
                .name = "img-pwm",
index 8a4df7a1f2eecc879a679711d13d64885397af39..e628d4c2f2ae43de1955aac857f745ec2d3d0357 100644 (file)
@@ -394,6 +394,7 @@ static inline struct da9052_regulator_info *find_regulator_info(u8 chip_id,
 
 static int da9052_regulator_probe(struct platform_device *pdev)
 {
+       const struct mfd_cell *cell = mfd_get_cell(pdev);
        struct regulator_config config = { };
        struct da9052_regulator *regulator;
        struct da9052 *da9052;
@@ -409,7 +410,7 @@ static int da9052_regulator_probe(struct platform_device *pdev)
        regulator->da9052 = da9052;
 
        regulator->info = find_regulator_info(regulator->da9052->chip_id,
-                                             pdev->id);
+                                             cell->id);
        if (regulator->info == NULL) {
                dev_err(&pdev->dev, "invalid regulator ID specified\n");
                return -EINVAL;
@@ -419,7 +420,7 @@ static int da9052_regulator_probe(struct platform_device *pdev)
        config.driver_data = regulator;
        config.regmap = da9052->regmap;
        if (pdata && pdata->regulators) {
-               config.init_data = pdata->regulators[pdev->id];
+               config.init_data = pdata->regulators[cell->id];
        } else {
 #ifdef CONFIG_OF
                struct device_node *nproot = da9052->dev->of_node;
index cb70ced7e0db19771699a7b362d63b7c49e06d31..4b62d1a875e43eb09bf4631f7695f31dda9bfa6d 100644 (file)
@@ -64,7 +64,7 @@ static void rtc_delayed_write(u32 val, struct armada38x_rtc *rtc, int offset)
 static int armada38x_rtc_read_time(struct device *dev, struct rtc_time *tm)
 {
        struct armada38x_rtc *rtc = dev_get_drvdata(dev);
-       unsigned long time, time_check, flags;
+       unsigned long time, time_check;
 
        mutex_lock(&rtc->mutex_time);
        time = readl(rtc->regs + RTC_TIME);
index f0b9871a4bbd3ff209d77e56f28818fdbcce25e6..3ba61141975914aa25ef42658471686756fdefb5 100644 (file)
@@ -1158,11 +1158,12 @@ static ssize_t poll_timeout_store(struct bus_type *bus, const char *buf,
        poll_timeout = time;
        hr_time = ktime_set(0, poll_timeout);
 
-       if (!hrtimer_is_queued(&ap_poll_timer) ||
-           !hrtimer_forward(&ap_poll_timer, hrtimer_get_expires(&ap_poll_timer), hr_time)) {
-               hrtimer_set_expires(&ap_poll_timer, hr_time);
-               hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS);
-       }
+       spin_lock_bh(&ap_poll_timer_lock);
+       hrtimer_cancel(&ap_poll_timer);
+       hrtimer_set_expires(&ap_poll_timer, hr_time);
+       hrtimer_start_expires(&ap_poll_timer, HRTIMER_MODE_ABS);
+       spin_unlock_bh(&ap_poll_timer_lock);
+
        return count;
 }
 
@@ -1528,14 +1529,11 @@ static inline void __ap_schedule_poll_timer(void)
        ktime_t hr_time;
 
        spin_lock_bh(&ap_poll_timer_lock);
-       if (hrtimer_is_queued(&ap_poll_timer) || ap_suspend_flag)
-               goto out;
-       if (ktime_to_ns(hrtimer_expires_remaining(&ap_poll_timer)) <= 0) {
+       if (!hrtimer_is_queued(&ap_poll_timer) && !ap_suspend_flag) {
                hr_time = ktime_set(0, poll_timeout);
                hrtimer_forward_now(&ap_poll_timer, hr_time);
                hrtimer_restart(&ap_poll_timer);
        }
-out:
        spin_unlock_bh(&ap_poll_timer_lock);
 }
 
@@ -1952,7 +1950,7 @@ static void ap_reset_domain(void)
 {
        int i;
 
-       if (ap_domain_index != -1)
+       if ((ap_domain_index != -1) && (ap_test_config_domain(ap_domain_index)))
                for (i = 0; i < AP_DEVICES; i++)
                        ap_reset_queue(AP_MKQID(i, ap_domain_index));
 }
@@ -2097,7 +2095,6 @@ void ap_module_exit(void)
        hrtimer_cancel(&ap_poll_timer);
        destroy_workqueue(ap_work_queue);
        tasklet_kill(&ap_tasklet);
-       root_device_unregister(ap_root_device);
        while ((dev = bus_find_device(&ap_bus_type, NULL, NULL,
                    __ap_match_all)))
        {
@@ -2106,6 +2103,7 @@ void ap_module_exit(void)
        }
        for (i = 0; ap_bus_attrs[i]; i++)
                bus_remove_file(&ap_bus_type, ap_bus_attrs[i]);
+       root_device_unregister(ap_root_device);
        bus_unregister(&ap_bus_type);
        unregister_reset_call(&ap_reset_call);
        if (ap_using_interrupts())
index 81e83a65a1936cb897a2a0bbfc4bcfabb957b276..32070099c33356d6dca288330337df8eba5e5fa6 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Avago Technologies
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -8,9 +8,9 @@
  * Public License is included in this distribution in the file called COPYING.
  *
  * Contact Information:
- * linux-drivers@emulex.com
+ * linux-drivers@avagotech.com
  *
- * Emulex
+ * Avago Technologies
  * 3333 Susan Street
  * Costa Mesa, CA 92626
  */
index 1028760b8a22145e792de569862641d627d5b89f..447cf7ce606ec6e3cf8b0540ba4f455c41aec2d5 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Avago Technologies
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -8,9 +8,9 @@
  * Public License is included in this distribution in the file called COPYING.
  *
  * Contact Information:
- * linux-drivers@emulex.com
+ * linux-drivers@avagotech.com
  *
- * Emulex
+ * Avago Technologies
  * 3333 Susan Street
  * Costa Mesa, CA 92626
  */
index 98897434bcb4580c23e6939f35b6e8344002f7a3..f11d325fe6963f191424b52d6f0c16d69ddef671 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Avago Technologies
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -8,9 +8,9 @@
  * Public License is included in this distribution in the file called COPYING.
  *
  * Contact Information:
- * linux-drivers@emulex.com
+ * linux-drivers@avagotech.com
  *
- * Emulex
+ * Avago Technologies
  * 3333 Susan Street
  * Costa Mesa, CA 92626
  */
index b7391a3f9f0ba1d3e1f9ba96649d7ca8a9e2af16..2f0700796842004812a12c6307487747f2868e63 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Avago Technologies
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -7,12 +7,12 @@
  * as published by the Free Software Foundation.  The full GNU General
  * Public License is included in this distribution in the file called COPYING.
  *
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
  *
  * Contact Information:
- * linux-drivers@emulex.com
+ * linux-drivers@avagotech.com
  *
- * Emulex
+ * Avago Technologies
  * 3333 Susan Street
  * Costa Mesa, CA 92626
  */
index e0b3b2d1f27a64e9b5ee1eba4b558046847c0375..0c84e1c0763acc98e04003be5b966fd2f277f450 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Avago Technologies
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -7,12 +7,12 @@
  * as published by the Free Software Foundation.  The full GNU General
  * Public License is included in this distribution in the file called COPYING.
  *
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
  *
  * Contact Information:
- * linux-drivers@emulex.com
+ * linux-drivers@avagotech.com
  *
- * Emulex
+ * Avago Technologies
  * 3333 Susan Street
  * Costa Mesa, CA 92626
  */
index 923a2b5a24395547212207312588b125f19de3a2..1f74760ce86cb27db2308f4dbe1d9ba25f10bcaa 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Avago Technologies
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -7,12 +7,12 @@
  * as published by the Free Software Foundation.  The full GNU General
  * Public License is included in this distribution in the file called COPYING.
  *
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
  *
  * Contact Information:
- * linux-drivers@emulex.com
+ * linux-drivers@avagotech.com
  *
- * Emulex
+ * Avago Technologies
  * 3333 Susan Street
  * Costa Mesa, CA 92626
  */
@@ -50,7 +50,7 @@ static unsigned int enable_msix = 1;
 
 MODULE_DESCRIPTION(DRV_DESC " " BUILD_STR);
 MODULE_VERSION(BUILD_STR);
-MODULE_AUTHOR("Emulex Corporation");
+MODULE_AUTHOR("Avago Technologies");
 MODULE_LICENSE("GPL");
 module_param(be_iopoll_budget, int, 0);
 module_param(enable_msix, int, 0);
@@ -552,7 +552,7 @@ MODULE_DEVICE_TABLE(pci, beiscsi_pci_id_table);
 
 static struct scsi_host_template beiscsi_sht = {
        .module = THIS_MODULE,
-       .name = "Emulex 10Gbe open-iscsi Initiator Driver",
+       .name = "Avago Technologies 10Gbe open-iscsi Initiator Driver",
        .proc_name = DRV_NAME,
        .queuecommand = iscsi_queuecommand,
        .change_queue_depth = scsi_change_queue_depth,
index 7ee0ffc3851468ad19b5defe60b36b71266a6980..e70ea26bbc2b0fff8a82c540edd4aceab371da45 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Avago Technologies
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -7,12 +7,12 @@
  * as published by the Free Software Foundation.  The full GNU General
  * Public License is included in this distribution in the file called COPYING.
  *
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
  *
  * Contact Information:
- * linux-drivers@emulex.com
+ * linux-drivers@avagotech.com
  *
- * Emulex
+ * Avago Technologies
  * 3333 Susan Street
  * Costa Mesa, CA 92626
  */
@@ -37,7 +37,7 @@
 
 #define DRV_NAME               "be2iscsi"
 #define BUILD_STR              "10.4.114.0"
-#define BE_NAME                        "Emulex OneConnect" \
+#define BE_NAME                        "Avago Technologies OneConnect" \
                                "Open-iSCSI Driver version" BUILD_STR
 #define DRV_DESC               BE_NAME " " "Driver"
 
index 681d4e8f003ab6d6873cf9d4bc35c2c87d81aa35..c2c4d6975fb7b22b2a4db35adf6ca69d2ea06547 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Avago Technologies
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -7,12 +7,12 @@
  * as published by the Free Software Foundation.  The full GNU General
  * Public License is included in this distribution in the file called COPYING.
  *
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
  *
  * Contact Information:
- * linux-drivers@emulex.com
+ * linux-drivers@avagotech.com
  *
- * Emulex
+ * Avago Technologies
  * 3333 Susan Street
  * Costa Mesa, CA 92626
  */
index bd81446936fc34c5ecbc77cdd013416a0f1224f2..9356b9a86b66fcc7d640361aab6670d64315244f 100644 (file)
@@ -1,5 +1,5 @@
 /**
- * Copyright (C) 2005 - 2014 Emulex
+ * Copyright (C) 2005 - 2015 Avago Technologies
  * All rights reserved.
  *
  * This program is free software; you can redistribute it and/or
@@ -7,12 +7,12 @@
  * as published by the Free Software Foundation.  The full GNU General
  * Public License is included in this distribution in the file called COPYING.
  *
- * Written by: Jayamohan Kallickal (jayamohan.kallickal@emulex.com)
+ * Written by: Jayamohan Kallickal (jayamohan.kallickal@avagotech.com)
  *
  * Contact Information:
- * linux-drivers@emulex.com
+ * linux-drivers@avagotech.com
  *
- * Emulex
+ * Avago Technologies
  * 3333 Susan Street
  * Costa Mesa, CA 92626
  */
index 5980c10c734d27f702b59a7dd443771fa740d75f..d6498fabe6282fe354bedb9214652c370900c1b0 100644 (file)
@@ -18,6 +18,7 @@
 #include <linux/module.h>
 #include <linux/errno.h>
 #include <linux/debugfs.h>
+#include <linux/vmalloc.h>
 #include "fnic.h"
 
 static struct dentry *fnic_trace_debugfs_root;
index 65a9bde26974bd9a5b98b33d7f456b16c9b5c10b..4e15c4bf079578afc8d944b0daeaca2264178fb2 100644 (file)
@@ -21,6 +21,7 @@
 #include <linux/spinlock.h>
 #include <linux/kallsyms.h>
 #include <linux/time.h>
+#include <linux/vmalloc.h>
 #include "fnic_io.h"
 #include "fnic.h"
 
index cb73cf9e9ba5c9a7d16c2b069ef649663cf067f8..c140f99772caa5963b15a67456b32d9194373927 100644 (file)
@@ -1129,25 +1129,6 @@ lpfc_release_scsi_buf(struct lpfc_hba *phba, struct lpfc_scsi_buf *psb)
        phba->lpfc_release_scsi_buf(phba, psb);
 }
 
-/**
- * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
- * @data: A pointer to the immediate command data portion of the IOCB.
- * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
- *
- * The routine copies the entire FCP command from @fcp_cmnd to @data while
- * byte swapping the data to big endian format for transmission on the wire.
- **/
-static void
-lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
-{
-       int i, j;
-
-       for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
-            i += sizeof(uint32_t), j++) {
-               ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
-       }
-}
-
 /**
  * lpfc_scsi_prep_dma_buf_s3 - DMA mapping for scsi buffer to SLI3 IF spec
  * @phba: The Hba for which this call is being executed.
@@ -1283,7 +1264,6 @@ lpfc_scsi_prep_dma_buf_s3(struct lpfc_hba *phba, struct lpfc_scsi_buf *lpfc_cmd)
         * we need to set word 4 of IOCB here
         */
        iocb_cmd->un.fcpi.fcpi_parm = scsi_bufflen(scsi_cmnd);
-       lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
        return 0;
 }
 
@@ -4146,6 +4126,24 @@ lpfc_scsi_cmd_iocb_cmpl(struct lpfc_hba *phba, struct lpfc_iocbq *pIocbIn,
        lpfc_release_scsi_buf(phba, lpfc_cmd);
 }
 
+/**
+ * lpfc_fcpcmd_to_iocb - copy the fcp_cmd data into the IOCB
+ * @data: A pointer to the immediate command data portion of the IOCB.
+ * @fcp_cmnd: The FCP Command that is provided by the SCSI layer.
+ *
+ * The routine copies the entire FCP command from @fcp_cmnd to @data while
+ * byte swapping the data to big endian format for transmission on the wire.
+ **/
+static void
+lpfc_fcpcmd_to_iocb(uint8_t *data, struct fcp_cmnd *fcp_cmnd)
+{
+       int i, j;
+       for (i = 0, j = 0; i < sizeof(struct fcp_cmnd);
+            i += sizeof(uint32_t), j++) {
+               ((uint32_t *)data)[j] = cpu_to_be32(((uint32_t *)fcp_cmnd)[j]);
+       }
+}
+
 /**
  * lpfc_scsi_prep_cmnd - Wrapper func for convert scsi cmnd to FCP info unit
  * @vport: The virtual port for which this call is being executed.
@@ -4225,6 +4223,9 @@ lpfc_scsi_prep_cmnd(struct lpfc_vport *vport, struct lpfc_scsi_buf *lpfc_cmd,
                fcp_cmnd->fcpCntl3 = 0;
                phba->fc4ControlRequests++;
        }
+       if (phba->sli_rev == 3 &&
+           !(phba->sli3_options & LPFC_SLI3_BG_ENABLED))
+               lpfc_fcpcmd_to_iocb(iocb_cmd->unsli3.fcp_ext.icd, fcp_cmnd);
        /*
         * Finish initializing those IOCB fields that are independent
         * of the scsi_cmnd request_buffer
index 68c2002e78bf80d3b383b92f76519901459059da..5c9e680aa375a57c07a8977790271615ffb1499d 100644 (file)
@@ -1020,8 +1020,7 @@ static void tcm_qla2xxx_depend_tpg(struct work_struct *work)
        struct se_portal_group *se_tpg = &base_tpg->se_tpg;
        struct scsi_qla_host *base_vha = base_tpg->lport->qla_vha;
 
-       if (!configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys,
-                                 &se_tpg->tpg_group.cg_item)) {
+       if (!target_depend_item(&se_tpg->tpg_group.cg_item)) {
                atomic_set(&base_tpg->lport_tpg_enabled, 1);
                qlt_enable_vha(base_vha);
        }
@@ -1037,8 +1036,7 @@ static void tcm_qla2xxx_undepend_tpg(struct work_struct *work)
 
        if (!qlt_stop_phase1(base_vha->vha_tgt.qla_tgt)) {
                atomic_set(&base_tpg->lport_tpg_enabled, 0);
-               configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys,
-                                      &se_tpg->tpg_group.cg_item);
+               target_undepend_item(&se_tpg->tpg_group.cg_item);
        }
        complete(&base_tpg->tpg_base_comp);
 }
index 79beebf53302e591bc0661372335561c65d3e006..7f9d65fe4fd9a441c1aa00f602f3dac14c866563 100644 (file)
@@ -1600,6 +1600,7 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
 {
        u64 start_lba = blk_rq_pos(scmd->request);
        u64 end_lba = blk_rq_pos(scmd->request) + (scsi_bufflen(scmd) / 512);
+       u64 factor = scmd->device->sector_size / 512;
        u64 bad_lba;
        int info_valid;
        /*
@@ -1621,16 +1622,9 @@ static unsigned int sd_completed_bytes(struct scsi_cmnd *scmd)
        if (scsi_bufflen(scmd) <= scmd->device->sector_size)
                return 0;
 
-       if (scmd->device->sector_size < 512) {
-               /* only legitimate sector_size here is 256 */
-               start_lba <<= 1;
-               end_lba <<= 1;
-       } else {
-               /* be careful ... don't want any overflows */
-               unsigned int factor = scmd->device->sector_size / 512;
-               do_div(start_lba, factor);
-               do_div(end_lba, factor);
-       }
+       /* be careful ... don't want any overflows */
+       do_div(start_lba, factor);
+       do_div(end_lba, factor);
 
        /* The bad lba was reported incorrectly, we have no idea where
         * the error is.
@@ -2188,8 +2182,7 @@ got_data:
        if (sector_size != 512 &&
            sector_size != 1024 &&
            sector_size != 2048 &&
-           sector_size != 4096 &&
-           sector_size != 256) {
+           sector_size != 4096) {
                sd_printk(KERN_NOTICE, sdkp, "Unsupported sector size %d.\n",
                          sector_size);
                /*
@@ -2244,8 +2237,6 @@ got_data:
                sdkp->capacity <<= 2;
        else if (sector_size == 1024)
                sdkp->capacity <<= 1;
-       else if (sector_size == 256)
-               sdkp->capacity >>= 1;
 
        blk_queue_physical_block_size(sdp->request_queue,
                                      sdkp->physical_block_size);
index d9dad90344d545a18185ecf0736fa79a4b6a212b..3c6584ff65c1979b6119c4edce15e6d35eb3957f 100644 (file)
@@ -1600,8 +1600,7 @@ static int storvsc_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *scmnd)
                break;
        default:
                vm_srb->data_in = UNKNOWN_TYPE;
-               vm_srb->win8_extension.srb_flags |= (SRB_FLAGS_DATA_IN |
-                                                    SRB_FLAGS_DATA_OUT);
+               vm_srb->win8_extension.srb_flags |= SRB_FLAGS_NO_DATA_TRANSFER;
                break;
        }
 
index bcdb22d5e215c9a393ccabe58f4f94ef132e3516..3c1850332a90212798ab5030554bc8fad39d9796 100644 (file)
@@ -4,6 +4,7 @@
 config MTK_PMIC_WRAP
        tristate "MediaTek PMIC Wrapper Support"
        depends on ARCH_MEDIATEK
+       depends on RESET_CONTROLLER
        select REGMAP
        help
          Say yes here to add support for MediaTek PMIC Wrapper found
index db5be1eec54c8db3977ea810e13c5470f416aaa7..f432291feee91e4b7c7b5ce3cc84f3b130933309 100644 (file)
@@ -443,11 +443,6 @@ static int pwrap_wait_for_state(struct pmic_wrapper *wrp,
 static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
 {
        int ret;
-       u32 val;
-
-       val = pwrap_readl(wrp, PWRAP_WACS2_RDATA);
-       if (PWRAP_GET_WACS_FSM(val) == PWRAP_WACS_FSM_WFVLDCLR)
-               pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
 
        ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
        if (ret)
@@ -462,11 +457,6 @@ static int pwrap_write(struct pmic_wrapper *wrp, u32 adr, u32 wdata)
 static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
 {
        int ret;
-       u32 val;
-
-       val = pwrap_readl(wrp, PWRAP_WACS2_RDATA);
-       if (PWRAP_GET_WACS_FSM(val) == PWRAP_WACS_FSM_WFVLDCLR)
-               pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
 
        ret = pwrap_wait_for_state(wrp, pwrap_is_fsm_idle);
        if (ret)
@@ -480,6 +470,8 @@ static int pwrap_read(struct pmic_wrapper *wrp, u32 adr, u32 *rdata)
 
        *rdata = PWRAP_GET_WACS_RDATA(pwrap_readl(wrp, PWRAP_WACS2_RDATA));
 
+       pwrap_writel(wrp, 1, PWRAP_WACS2_VLDCLR);
+
        return 0;
 }
 
@@ -563,45 +555,17 @@ static int pwrap_init_sidly(struct pmic_wrapper *wrp)
 
 static int pwrap_init_reg_clock(struct pmic_wrapper *wrp)
 {
-       unsigned long rate_spi;
-       int ck_mhz;
-
-       rate_spi = clk_get_rate(wrp->clk_spi);
-
-       if (rate_spi > 26000000)
-               ck_mhz = 26;
-       else if (rate_spi > 18000000)
-               ck_mhz = 18;
-       else
-               ck_mhz = 0;
-
-       switch (ck_mhz) {
-       case 18:
-               if (pwrap_is_mt8135(wrp))
-                       pwrap_writel(wrp, 0xc, PWRAP_CSHEXT);
-               pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_WRITE);
-               pwrap_writel(wrp, 0xc, PWRAP_CSHEXT_READ);
-               pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_START);
-               pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_END);
-               break;
-       case 26:
-               if (pwrap_is_mt8135(wrp))
-                       pwrap_writel(wrp, 0x4, PWRAP_CSHEXT);
+       if (pwrap_is_mt8135(wrp)) {
+               pwrap_writel(wrp, 0x4, PWRAP_CSHEXT);
                pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE);
                pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ);
                pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_START);
                pwrap_writel(wrp, 0x0, PWRAP_CSLEXT_END);
-               break;
-       case 0:
-               if (pwrap_is_mt8135(wrp))
-                       pwrap_writel(wrp, 0xf, PWRAP_CSHEXT);
-               pwrap_writel(wrp, 0xf, PWRAP_CSHEXT_WRITE);
-               pwrap_writel(wrp, 0xf, PWRAP_CSHEXT_READ);
-               pwrap_writel(wrp, 0xf, PWRAP_CSLEXT_START);
-               pwrap_writel(wrp, 0xf, PWRAP_CSLEXT_END);
-               break;
-       default:
-               return -EINVAL;
+       } else {
+               pwrap_writel(wrp, 0x0, PWRAP_CSHEXT_WRITE);
+               pwrap_writel(wrp, 0x4, PWRAP_CSHEXT_READ);
+               pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_START);
+               pwrap_writel(wrp, 0x2, PWRAP_CSLEXT_END);
        }
 
        return 0;
index 198f96b7fb45dab78845ba1fcde123ea680e2082..72b059081559356100aa68dea2c021b056b492e5 100644 (file)
@@ -78,6 +78,7 @@ config SPI_ATMEL
 config SPI_BCM2835
        tristate "BCM2835 SPI controller"
        depends on ARCH_BCM2835 || COMPILE_TEST
+       depends on GPIOLIB
        help
          This selects a driver for the Broadcom BCM2835 SPI master.
 
@@ -302,7 +303,7 @@ config SPI_FSL_SPI
 config SPI_FSL_DSPI
        tristate "Freescale DSPI controller"
        select REGMAP_MMIO
-       depends on SOC_VF610 || COMPILE_TEST
+       depends on SOC_VF610 || SOC_LS1021A || COMPILE_TEST
        help
          This enables support for the Freescale DSPI controller in master
          mode. VF610 platform uses the controller.
index f63864a893c520c40d9c79f1c8ca838b15dedc8c..37875cf942f7b928c5d31f44345b5c060f182b36 100644 (file)
@@ -164,13 +164,12 @@ static int bcm2835_spi_transfer_one_poll(struct spi_master *master,
                                         unsigned long xfer_time_us)
 {
        struct bcm2835_spi *bs = spi_master_get_devdata(master);
-       unsigned long timeout = jiffies +
-               max(4 * xfer_time_us * HZ / 1000000, 2uL);
+       /* set timeout to 1 second of maximum polling */
+       unsigned long timeout = jiffies + HZ;
 
        /* enable HW block without interrupts */
        bcm2835_wr(bs, BCM2835_SPI_CS, cs | BCM2835_SPI_CS_TA);
 
-       /* set timeout to 4x the expected time, or 2 jiffies */
        /* loop until finished the transfer */
        while (bs->rx_len) {
                /* read from fifo as much as possible */
index 5ef6638d5e8a2698a6c8e85fad41c46d1f03fd06..840a4984d3650e27dcf98713235a67e040b8ca1a 100644 (file)
@@ -180,7 +180,6 @@ int spi_bitbang_setup(struct spi_device *spi)
 {
        struct spi_bitbang_cs   *cs = spi->controller_state;
        struct spi_bitbang      *bitbang;
-       int                     retval;
        unsigned long           flags;
 
        bitbang = spi_master_get_devdata(spi->master);
@@ -197,9 +196,11 @@ int spi_bitbang_setup(struct spi_device *spi)
        if (!cs->txrx_word)
                return -EINVAL;
 
-       retval = bitbang->setup_transfer(spi, NULL);
-       if (retval < 0)
-               return retval;
+       if (bitbang->setup_transfer) {
+               int retval = bitbang->setup_transfer(spi, NULL);
+               if (retval < 0)
+                       return retval;
+       }
 
        dev_dbg(&spi->dev, "%s, %u nsec/bit\n", __func__, 2 * cs->nsecs);
 
@@ -295,9 +296,11 @@ static int spi_bitbang_transfer_one(struct spi_master *master,
 
                /* init (-1) or override (1) transfer params */
                if (do_setup != 0) {
-                       status = bitbang->setup_transfer(spi, t);
-                       if (status < 0)
-                               break;
+                       if (bitbang->setup_transfer) {
+                               status = bitbang->setup_transfer(spi, t);
+                               if (status < 0)
+                                       break;
+                       }
                        if (do_setup == -1)
                                do_setup = 0;
                }
index 9c46a3058743b75228256f55e64b8419b49c1f3d..896add8cfd3b6c6bba311a335072e63a4060a3a4 100644 (file)
@@ -24,6 +24,7 @@
 #include <linux/of_address.h>
 #include <linux/spi/spi.h>
 #include <linux/types.h>
+#include <linux/platform_device.h>
 
 #include "spi-fsl-cpm.h"
 #include "spi-fsl-lib.h"
@@ -269,17 +270,6 @@ static unsigned long fsl_spi_cpm_get_pram(struct mpc8xxx_spi *mspi)
        if (mspi->flags & SPI_CPM2) {
                pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
                out_be16(spi_base, pram_ofs);
-       } else {
-               struct spi_pram __iomem *pram = spi_base;
-               u16 rpbase = in_be16(&pram->rpbase);
-
-               /* Microcode relocation patch applied? */
-               if (rpbase) {
-                       pram_ofs = rpbase;
-               } else {
-                       pram_ofs = cpm_muram_alloc(SPI_PRAM_SIZE, 64);
-                       out_be16(spi_base, pram_ofs);
-               }
        }
 
        iounmap(spi_base);
@@ -292,7 +282,6 @@ int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
        struct device_node *np = dev->of_node;
        const u32 *iprop;
        int size;
-       unsigned long pram_ofs;
        unsigned long bds_ofs;
 
        if (!(mspi->flags & SPI_CPM_MODE))
@@ -319,8 +308,26 @@ int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
                }
        }
 
-       pram_ofs = fsl_spi_cpm_get_pram(mspi);
-       if (IS_ERR_VALUE(pram_ofs)) {
+       if (mspi->flags & SPI_CPM1) {
+               struct resource *res;
+               void *pram;
+
+               res = platform_get_resource(to_platform_device(dev),
+                                           IORESOURCE_MEM, 1);
+               pram = devm_ioremap_resource(dev, res);
+               if (IS_ERR(pram))
+                       mspi->pram = NULL;
+               else
+                       mspi->pram = pram;
+       } else {
+               unsigned long pram_ofs = fsl_spi_cpm_get_pram(mspi);
+
+               if (IS_ERR_VALUE(pram_ofs))
+                       mspi->pram = NULL;
+               else
+                       mspi->pram = cpm_muram_addr(pram_ofs);
+       }
+       if (mspi->pram == NULL) {
                dev_err(dev, "can't allocate spi parameter ram\n");
                goto err_pram;
        }
@@ -346,8 +353,6 @@ int fsl_spi_cpm_init(struct mpc8xxx_spi *mspi)
                goto err_dummy_rx;
        }
 
-       mspi->pram = cpm_muram_addr(pram_ofs);
-
        mspi->tx_bd = cpm_muram_addr(bds_ofs);
        mspi->rx_bd = cpm_muram_addr(bds_ofs + sizeof(*mspi->tx_bd));
 
@@ -375,7 +380,8 @@ err_dummy_rx:
 err_dummy_tx:
        cpm_muram_free(bds_ofs);
 err_bds:
-       cpm_muram_free(pram_ofs);
+       if (!(mspi->flags & SPI_CPM1))
+               cpm_muram_free(cpm_muram_offset(mspi->pram));
 err_pram:
        fsl_spi_free_dummy_rx();
        return -ENOMEM;
index d0a73a09a9bd3e02371a30bc4b5ceb34f6b52e4e..80d245ac846fa366abf8d2b7511a1fb4ac25c03e 100644 (file)
@@ -359,14 +359,16 @@ static void fsl_espi_rw_trans(struct spi_message *m,
                                struct fsl_espi_transfer *trans, u8 *rx_buff)
 {
        struct fsl_espi_transfer *espi_trans = trans;
-       unsigned int n_tx = espi_trans->n_tx;
-       unsigned int n_rx = espi_trans->n_rx;
+       unsigned int total_len = espi_trans->len;
        struct spi_transfer *t;
        u8 *local_buf;
        u8 *rx_buf = rx_buff;
        unsigned int trans_len;
        unsigned int addr;
-       int i, pos, loop;
+       unsigned int tx_only;
+       unsigned int rx_pos = 0;
+       unsigned int pos;
+       int i, loop;
 
        local_buf = kzalloc(SPCOM_TRANLEN_MAX, GFP_KERNEL);
        if (!local_buf) {
@@ -374,36 +376,48 @@ static void fsl_espi_rw_trans(struct spi_message *m,
                return;
        }
 
-       for (pos = 0, loop = 0; pos < n_rx; pos += trans_len, loop++) {
-               trans_len = n_rx - pos;
-               if (trans_len > SPCOM_TRANLEN_MAX - n_tx)
-                       trans_len = SPCOM_TRANLEN_MAX - n_tx;
+       for (pos = 0, loop = 0; pos < total_len; pos += trans_len, loop++) {
+               trans_len = total_len - pos;
 
                i = 0;
+               tx_only = 0;
                list_for_each_entry(t, &m->transfers, transfer_list) {
                        if (t->tx_buf) {
                                memcpy(local_buf + i, t->tx_buf, t->len);
                                i += t->len;
+                               if (!t->rx_buf)
+                                       tx_only += t->len;
                        }
                }
 
+               /* Add additional TX bytes to compensate SPCOM_TRANLEN_MAX */
+               if (loop > 0)
+                       trans_len += tx_only;
+
+               if (trans_len > SPCOM_TRANLEN_MAX)
+                       trans_len = SPCOM_TRANLEN_MAX;
+
+               /* Update device offset */
                if (pos > 0) {
                        addr = fsl_espi_cmd2addr(local_buf);
-                       addr += pos;
+                       addr += rx_pos;
                        fsl_espi_addr2cmd(addr, local_buf);
                }
 
-               espi_trans->n_tx = n_tx;
-               espi_trans->n_rx = trans_len;
-               espi_trans->len = trans_len + n_tx;
+               espi_trans->len = trans_len;
                espi_trans->tx_buf = local_buf;
                espi_trans->rx_buf = local_buf;
                fsl_espi_do_trans(m, espi_trans);
 
-               memcpy(rx_buf + pos, espi_trans->rx_buf + n_tx, trans_len);
+               /* If there is at least one RX byte then copy it to rx_buf */
+               if (tx_only < SPCOM_TRANLEN_MAX)
+                       memcpy(rx_buf + rx_pos, espi_trans->rx_buf + tx_only,
+                                       trans_len - tx_only);
+
+               rx_pos += trans_len - tx_only;
 
                if (loop > 0)
-                       espi_trans->actual_length += espi_trans->len - n_tx;
+                       espi_trans->actual_length += espi_trans->len - tx_only;
                else
                        espi_trans->actual_length += espi_trans->len;
        }
@@ -418,6 +432,7 @@ static int fsl_espi_do_one_msg(struct spi_master *master,
        u8 *rx_buf = NULL;
        unsigned int n_tx = 0;
        unsigned int n_rx = 0;
+       unsigned int xfer_len = 0;
        struct fsl_espi_transfer espi_trans;
 
        list_for_each_entry(t, &m->transfers, transfer_list) {
@@ -427,11 +442,13 @@ static int fsl_espi_do_one_msg(struct spi_master *master,
                        n_rx += t->len;
                        rx_buf = t->rx_buf;
                }
+               if ((t->tx_buf) || (t->rx_buf))
+                       xfer_len += t->len;
        }
 
        espi_trans.n_tx = n_tx;
        espi_trans.n_rx = n_rx;
-       espi_trans.len = n_tx + n_rx;
+       espi_trans.len = xfer_len;
        espi_trans.actual_length = 0;
        espi_trans.status = 0;
 
index 4df8942058deed3928e61a4b2bc4061c56eec7d7..d1a5b9fc3eba22edaafd6155a5292ee108d97ac7 100644 (file)
@@ -1210,6 +1210,7 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master,
        struct omap2_mcspi      *mcspi;
        struct omap2_mcspi_dma  *mcspi_dma;
        struct spi_transfer     *t;
+       int status;
 
        spi = m->spi;
        mcspi = spi_master_get_devdata(master);
@@ -1229,7 +1230,8 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master,
                                        tx_buf ? "tx" : "",
                                        rx_buf ? "rx" : "",
                                        t->bits_per_word);
-                       return -EINVAL;
+                       status = -EINVAL;
+                       goto out;
                }
 
                if (m->is_dma_mapped || len < DMA_MIN_BYTES)
@@ -1241,7 +1243,8 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master,
                        if (dma_mapping_error(mcspi->dev, t->tx_dma)) {
                                dev_dbg(mcspi->dev, "dma %cX %d bytes error\n",
                                                'T', len);
-                               return -EINVAL;
+                               status = -EINVAL;
+                               goto out;
                        }
                }
                if (mcspi_dma->dma_rx && rx_buf != NULL) {
@@ -1253,14 +1256,19 @@ static int omap2_mcspi_transfer_one_message(struct spi_master *master,
                                if (tx_buf != NULL)
                                        dma_unmap_single(mcspi->dev, t->tx_dma,
                                                        len, DMA_TO_DEVICE);
-                               return -EINVAL;
+                               status = -EINVAL;
+                               goto out;
                        }
                }
        }
 
        omap2_mcspi_work(mcspi, m);
+       /* spi_finalize_current_message() changes the status inside the
+        * spi_message, save the status here. */
+       status = m->status;
+out:
        spi_finalize_current_message(master);
-       return 0;
+       return status;
 }
 
 static int omap2_mcspi_master_setup(struct omap2_mcspi *mcspi)
index d5d7d2235163f9ef8c6430d99544ec7f2c791c6b..50910d85df5af28d6f281485cc79d4b3e15bc7e5 100644 (file)
@@ -583,6 +583,15 @@ static int spi_unmap_msg(struct spi_master *master, struct spi_message *msg)
        rx_dev = master->dma_rx->device->dev;
 
        list_for_each_entry(xfer, &msg->transfers, transfer_list) {
+               /*
+                * Restore the original value of tx_buf or rx_buf if they are
+                * NULL.
+                */
+               if (xfer->tx_buf == master->dummy_tx)
+                       xfer->tx_buf = NULL;
+               if (xfer->rx_buf == master->dummy_rx)
+                       xfer->rx_buf = NULL;
+
                if (!master->can_dma(master, msg->spi, xfer))
                        continue;
 
index 09428412139e399979537da2e6272eda827a8757..c5352ea4821ea0df593c7043ac911ee891f103b0 100644 (file)
@@ -621,8 +621,8 @@ static u32 ssb_pmu_get_alp_clock_clk0(struct ssb_chipcommon *cc)
        u32 crystalfreq;
        const struct pmu0_plltab_entry *e = NULL;
 
-       crystalfreq = chipco_read32(cc, SSB_CHIPCO_PMU_CTL) &
-                     SSB_CHIPCO_PMU_CTL_XTALFREQ >> SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT;
+       crystalfreq = (chipco_read32(cc, SSB_CHIPCO_PMU_CTL) &
+                      SSB_CHIPCO_PMU_CTL_XTALFREQ)  >> SSB_CHIPCO_PMU_CTL_XTALFREQ_SHIFT;
        e = pmu0_plltab_find_entry(crystalfreq);
        BUG_ON(!e);
        return e->freq * 1000;
@@ -634,7 +634,7 @@ u32 ssb_pmu_get_alp_clock(struct ssb_chipcommon *cc)
 
        switch (bus->chip_id) {
        case 0x5354:
-               ssb_pmu_get_alp_clock_clk0(cc);
+               return ssb_pmu_get_alp_clock_clk0(cc);
        default:
                ssb_err("ERROR: PMU alp clock unknown for device %04X\n",
                        bus->chip_id);
index 15a7ee3859dd7dd74aee31593876b422093eff04..5fe1c22e289b881cacebbf081d7c245fad7d2098 100644 (file)
@@ -359,12 +359,13 @@ static void ssb_pcicore_init_hostmode(struct ssb_pcicore *pc)
 
        /*
         * Accessing PCI config without a proper delay after devices reset (not
-        * GPIO reset) was causing reboots on WRT300N v1.0.
+        * GPIO reset) was causing reboots on WRT300N v1.0 (BCM4704).
         * Tested delay 850 us lowered reboot chance to 50-80%, 1000 us fixed it
         * completely. Flushing all writes was also tested but with no luck.
+        * The same problem was reported for WRT350N v1 (BCM4705), so we just
+        * sleep here unconditionally.
         */
-       if (pc->dev->bus->chip_id == 0x4704)
-               usleep_range(1000, 2000);
+       usleep_range(1000, 2000);
 
        /* Enable PCI bridge BAR0 prefetch and burst */
        val = PCI_COMMAND_MASTER | PCI_COMMAND_MEMORY;
index 8199b0a697bb5f8b672aabad8f9685c8c51a8f5e..1cf24e4edf251ca1896a72073c5fdf6df0eac336 100644 (file)
@@ -158,7 +158,7 @@ static int up_to_host(struct mux_rx *r)
        unsigned int start_flag;
        unsigned int payload_size;
        unsigned short packet_type;
-       int dummy_cnt;
+       int total_len;
        u32 packet_size_sum = r->offset;
        int index;
        int ret = TO_HOST_INVALID_PACKET;
@@ -176,10 +176,10 @@ static int up_to_host(struct mux_rx *r)
                        break;
                }
 
-               dummy_cnt = ALIGN(MUX_HEADER_SIZE + payload_size, 4);
+               total_len = ALIGN(MUX_HEADER_SIZE + payload_size, 4);
 
                if (len - packet_size_sum <
-                       MUX_HEADER_SIZE + payload_size + dummy_cnt) {
+                       total_len) {
                        pr_err("invalid payload : %d %d %04x\n",
                               payload_size, len, packet_type);
                        break;
@@ -202,7 +202,7 @@ static int up_to_host(struct mux_rx *r)
                        break;
                }
 
-               packet_size_sum += MUX_HEADER_SIZE + payload_size + dummy_cnt;
+               packet_size_sum += total_len;
                if (len - packet_size_sum <= MUX_HEADER_SIZE + 2) {
                        ret = r->callback(NULL,
                                        0,
@@ -361,7 +361,6 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
        struct mux_pkt_header *mux_header;
        struct mux_tx *t = NULL;
        static u32 seq_num = 1;
-       int dummy_cnt;
        int total_len;
        int ret;
        unsigned long flags;
@@ -374,9 +373,7 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
 
        spin_lock_irqsave(&mux_dev->write_lock, flags);
 
-       dummy_cnt = ALIGN(MUX_HEADER_SIZE + len, 4);
-
-       total_len = len + MUX_HEADER_SIZE + dummy_cnt;
+       total_len = ALIGN(MUX_HEADER_SIZE + len, 4);
 
        t = alloc_mux_tx(total_len);
        if (!t) {
@@ -392,7 +389,8 @@ static int gdm_mux_send(void *priv_dev, void *data, int len, int tty_index,
        mux_header->packet_type = __cpu_to_le16(packet_type[tty_index]);
 
        memcpy(t->buf+MUX_HEADER_SIZE, data, len);
-       memset(t->buf+MUX_HEADER_SIZE+len, 0, dummy_cnt);
+       memset(t->buf+MUX_HEADER_SIZE+len, 0, total_len - MUX_HEADER_SIZE -
+              len);
 
        t->len = total_len;
        t->callback = cb;
index 3925db160650ca5d96df0880331717188a2ce23f..513c81f43d6e87926a2d368fc023525a6a37b338 100644 (file)
@@ -189,22 +189,7 @@ static inline int ll_quota_off(struct super_block *sb, int off, int remount)
 #endif
 
 
-
-/*
- * After 3.1, kernel's nameidata.intent.open.flags is different
- * with lustre's lookup_intent.it_flags, as lustre's it_flags'
- * lower bits equal to FMODE_xxx while kernel doesn't transliterate
- * lower bits of nameidata.intent.open.flags to FMODE_xxx.
- * */
 #include <linux/version.h>
-static inline int ll_namei_to_lookup_intent_flag(int flag)
-{
-#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 1, 0)
-       flag = (flag & ~O_ACCMODE) | OPEN_FMODE(flag);
-#endif
-       return flag;
-}
-
 #include <linux/fs.h>
 
 # define ll_umode_t    umode_t
index cc3ab351943e195a454d33d3f8d8529694f5ac0d..f9262243f9359aa2b9ae0ddd00a9f5c0937cbe6f 100644 (file)
@@ -87,7 +87,7 @@ static void cfs_cpu_core_siblings(int cpu, cpumask_t *mask)
 /* return cpumask of HTs in the same core */
 static void cfs_cpu_ht_siblings(int cpu, cpumask_t *mask)
 {
-       cpumask_copy(mask, topology_thread_cpumask(cpu));
+       cpumask_copy(mask, topology_sibling_cpumask(cpu));
 }
 
 static void cfs_node_to_cpumask(int node, cpumask_t *mask)
index 5f918e3c4683ddac82c864d1d45f7c5700d20282..528af9011653d199f77dddb9926168acbfa96fef 100644 (file)
 #define VM_FAULT_RETRY 0
 #endif
 
-/* Kernel 3.1 kills LOOKUP_CONTINUE, LOOKUP_PARENT is equivalent to it.
- * seem kernel commit 49084c3bb2055c401f3493c13edae14d49128ca0 */
-#ifndef LOOKUP_CONTINUE
-#define LOOKUP_CONTINUE LOOKUP_PARENT
-#endif
-
 /** Only used on client-side for indicating the tail of dir hash/offset. */
 #define LL_DIR_END_OFF   0x7fffffffffffffffULL
 #define LL_DIR_END_OFF_32BIT    0x7fffffffUL
index 3711e671a4dfaa21af87d19857416f8f0f631734..69b203651905e93f77149754a5b9d6a021b6bf32 100644 (file)
@@ -118,7 +118,7 @@ failed:
        return rc;
 }
 
-static void *ll_follow_link(struct dentry *dentry, struct nameidata *nd)
+static const char *ll_follow_link(struct dentry *dentry, void **cookie)
 {
        struct inode *inode = d_inode(dentry);
        struct ptlrpc_request *request = NULL;
@@ -126,32 +126,22 @@ static void *ll_follow_link(struct dentry *dentry, struct nameidata *nd)
        char *symname = NULL;
 
        CDEBUG(D_VFSTRACE, "VFS Op\n");
-       /* Limit the recursive symlink depth to 5 instead of default
-        * 8 links when kernel has 4k stack to prevent stack overflow.
-        * For 8k stacks we need to limit it to 7 for local servers. */
-       if (THREAD_SIZE < 8192 && current->link_count >= 6) {
-               rc = -ELOOP;
-       } else if (THREAD_SIZE == 8192 && current->link_count >= 8) {
-               rc = -ELOOP;
-       } else {
-               ll_inode_size_lock(inode);
-               rc = ll_readlink_internal(inode, &request, &symname);
-               ll_inode_size_unlock(inode);
-       }
+       ll_inode_size_lock(inode);
+       rc = ll_readlink_internal(inode, &request, &symname);
+       ll_inode_size_unlock(inode);
        if (rc) {
                ptlrpc_req_finished(request);
-               request = NULL;
-               symname = ERR_PTR(rc);
+               return ERR_PTR(rc);
        }
 
-       nd_set_link(nd, symname);
        /* symname may contain a pointer to the request message buffer,
         * we delay request releasing until ll_put_link then.
         */
-       return request;
+       *cookie = request;
+       return symname;
 }
 
-static void ll_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
+static void ll_put_link(struct inode *unused, void *cookie)
 {
        ptlrpc_req_finished(cookie);
 }
index 8e61421515cb689e205681257fe634f9baeb72bc..344189ac5698a575accbaf7afc5f1ec940651240 100644 (file)
@@ -557,7 +557,7 @@ ptlrpc_server_nthreads_check(struct ptlrpc_service *svc,
                 * there are.
                 */
                /* weight is # of HTs */
-               if (cpumask_weight(topology_thread_cpumask(0)) > 1) {
+               if (cpumask_weight(topology_sibling_cpumask(0)) > 1) {
                        /* depress thread factor for hyper-thread */
                        factor = factor - (factor >> 1) + (factor >> 3);
                }
@@ -2768,7 +2768,7 @@ int ptlrpc_hr_init(void)
 
        init_waitqueue_head(&ptlrpc_hr.hr_waitq);
 
-       weight = cpumask_weight(topology_thread_cpumask(0));
+       weight = cpumask_weight(topology_sibling_cpumask(0));
 
        cfs_percpt_for_each(hrp, i, ptlrpc_hr.hr_partitions) {
                hrp->hrp_cpt = i;
index 5ff4716b72c311485084005b9e09a36021157530..784b5ecfa8493ba07d8ba90cde1b11b2b6a4b6b7 100644 (file)
@@ -746,8 +746,8 @@ void oz_hcd_pd_reset(void *hpd, void *hport)
 /*
  * Context: softirq
  */
-void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc,
-                       int length, int offset, int total_size)
+void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status, const u8 *desc,
+                       u8 length, u16 offset, u16 total_size)
 {
        struct oz_port *port = hport;
        struct urb *urb;
@@ -759,8 +759,8 @@ void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status, const u8 *desc,
        if (!urb)
                return;
        if (status == 0) {
-               int copy_len;
-               int required_size = urb->transfer_buffer_length;
+               unsigned int copy_len;
+               unsigned int required_size = urb->transfer_buffer_length;
 
                if (required_size > total_size)
                        required_size = total_size;
index 4249fa37401289c4caf1f4cae4d46dba321f276b..d2a6085345bec8c2e927115389efc46bfbad3019 100644 (file)
@@ -29,8 +29,8 @@ void oz_usb_request_heartbeat(void *hpd);
 
 /* Confirmation functions.
  */
-void oz_hcd_get_desc_cnf(void *hport, u8 req_id, int status,
-       const u8 *desc, int length, int offset, int total_size);
+void oz_hcd_get_desc_cnf(void *hport, u8 req_id, u8 status,
+       const u8 *desc, u8 length, u16 offset, u16 total_size);
 void oz_hcd_control_cnf(void *hport, u8 req_id, u8 rcode,
        const u8 *data, int data_len);
 
index d434d8c6fff67c04b58d6cac5c76a6832bae5bc3..f660bb198c65534a6cbe8183d3f5d0a30a532eb1 100644 (file)
@@ -326,7 +326,11 @@ static void oz_usb_handle_ep_data(struct oz_usb_ctx *usb_ctx,
                        struct oz_multiple_fixed *body =
                                (struct oz_multiple_fixed *)data_hdr;
                        u8 *data = body->data;
-                       int n = (len - sizeof(struct oz_multiple_fixed)+1)
+                       unsigned int n;
+                       if (!body->unit_size ||
+                               len < sizeof(struct oz_multiple_fixed) - 1)
+                               break;
+                       n = (len - (sizeof(struct oz_multiple_fixed) - 1))
                                / body->unit_size;
                        while (n--) {
                                oz_hcd_data_ind(usb_ctx->hport, body->endpoint,
@@ -390,10 +394,15 @@ void oz_usb_rx(struct oz_pd *pd, struct oz_elt *elt)
        case OZ_GET_DESC_RSP: {
                        struct oz_get_desc_rsp *body =
                                (struct oz_get_desc_rsp *)usb_hdr;
-                       int data_len = elt->length -
-                                       sizeof(struct oz_get_desc_rsp) + 1;
-                       u16 offs = le16_to_cpu(get_unaligned(&body->offset));
-                       u16 total_size =
+                       u16 offs, total_size;
+                       u8 data_len;
+
+                       if (elt->length < sizeof(struct oz_get_desc_rsp) - 1)
+                               break;
+                       data_len = elt->length -
+                                       (sizeof(struct oz_get_desc_rsp) - 1);
+                       offs = le16_to_cpu(get_unaligned(&body->offset));
+                       total_size =
                                le16_to_cpu(get_unaligned(&body->total_size));
                        oz_dbg(ON, "USB_REQ_GET_DESCRIPTOR - cnf\n");
                        oz_hcd_get_desc_cnf(usb_ctx->hport, body->req_id,
index f1d47a0676c3e3ba29ea974754c77e8a32a3f950..ada8d5dafd492e97a1b4d9457d25e4a485e67556 100644 (file)
@@ -898,11 +898,11 @@ static void SwLedControlMode1(struct _adapter *padapter,
                          IS_LED_WPS_BLINKING(pLed))
                                return;
                        if (pLed->bLedLinkBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedLinkBlinkInProgress = false;
                        }
                        if (pLed->bLedBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedBlinkInProgress = false;
                        }
                        pLed->bLedNoLinkBlinkInProgress = true;
@@ -921,11 +921,11 @@ static void SwLedControlMode1(struct _adapter *padapter,
                            IS_LED_WPS_BLINKING(pLed))
                                return;
                        if (pLed->bLedNoLinkBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedNoLinkBlinkInProgress = false;
                        }
                        if (pLed->bLedBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedBlinkInProgress = false;
                        }
                        pLed->bLedLinkBlinkInProgress = true;
@@ -946,15 +946,15 @@ static void SwLedControlMode1(struct _adapter *padapter,
                        if (IS_LED_WPS_BLINKING(pLed))
                                return;
                        if (pLed->bLedNoLinkBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedNoLinkBlinkInProgress = false;
                        }
                        if (pLed->bLedLinkBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                 pLed->bLedLinkBlinkInProgress = false;
                        }
                        if (pLed->bLedBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedBlinkInProgress = false;
                        }
                        pLed->bLedScanBlinkInProgress = true;
@@ -975,11 +975,11 @@ static void SwLedControlMode1(struct _adapter *padapter,
                            IS_LED_WPS_BLINKING(pLed))
                                return;
                        if (pLed->bLedNoLinkBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedNoLinkBlinkInProgress = false;
                        }
                        if (pLed->bLedLinkBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedLinkBlinkInProgress = false;
                        }
                        pLed->bLedBlinkInProgress = true;
@@ -998,19 +998,19 @@ static void SwLedControlMode1(struct _adapter *padapter,
        case LED_CTL_START_WPS_BOTTON:
                 if (pLed->bLedWPSBlinkInProgress == false) {
                        if (pLed->bLedNoLinkBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedNoLinkBlinkInProgress = false;
                        }
                        if (pLed->bLedLinkBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                 pLed->bLedLinkBlinkInProgress = false;
                        }
                        if (pLed->bLedBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedBlinkInProgress = false;
                        }
                        if (pLed->bLedScanBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedScanBlinkInProgress = false;
                        }
                        pLed->bLedWPSBlinkInProgress = true;
@@ -1025,23 +1025,23 @@ static void SwLedControlMode1(struct _adapter *padapter,
                break;
        case LED_CTL_STOP_WPS:
                if (pLed->bLedNoLinkBlinkInProgress == true) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedNoLinkBlinkInProgress = false;
                }
                if (pLed->bLedLinkBlinkInProgress == true) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                         pLed->bLedLinkBlinkInProgress = false;
                }
                if (pLed->bLedBlinkInProgress == true) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedBlinkInProgress = false;
                }
                if (pLed->bLedScanBlinkInProgress == true) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedScanBlinkInProgress = false;
                }
                if (pLed->bLedWPSBlinkInProgress)
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                else
                        pLed->bLedWPSBlinkInProgress = true;
                pLed->CurrLedState = LED_BLINK_WPS_STOP;
@@ -1057,7 +1057,7 @@ static void SwLedControlMode1(struct _adapter *padapter,
                break;
        case LED_CTL_STOP_WPS_FAIL:
                if (pLed->bLedWPSBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedWPSBlinkInProgress = false;
                }
                pLed->bLedNoLinkBlinkInProgress = true;
@@ -1073,23 +1073,23 @@ static void SwLedControlMode1(struct _adapter *padapter,
                pLed->CurrLedState = LED_OFF;
                pLed->BlinkingLedState = LED_OFF;
                if (pLed->bLedNoLinkBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedNoLinkBlinkInProgress = false;
                }
                if (pLed->bLedLinkBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedLinkBlinkInProgress = false;
                }
                if (pLed->bLedBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedBlinkInProgress = false;
                }
                if (pLed->bLedWPSBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedWPSBlinkInProgress = false;
                }
                if (pLed->bLedScanBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedScanBlinkInProgress = false;
                }
                mod_timer(&pLed->BlinkTimer,
@@ -1116,7 +1116,7 @@ static void SwLedControlMode2(struct _adapter *padapter,
                                return;
 
                        if (pLed->bLedBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedBlinkInProgress = false;
                        }
                        pLed->bLedScanBlinkInProgress = true;
@@ -1154,11 +1154,11 @@ static void SwLedControlMode2(struct _adapter *padapter,
                pLed->CurrLedState = LED_ON;
                pLed->BlinkingLedState = LED_ON;
                if (pLed->bLedBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedBlinkInProgress = false;
                }
                if (pLed->bLedScanBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedScanBlinkInProgress = false;
                }
 
@@ -1170,11 +1170,11 @@ static void SwLedControlMode2(struct _adapter *padapter,
        case LED_CTL_START_WPS_BOTTON:
                if (pLed->bLedWPSBlinkInProgress == false) {
                        if (pLed->bLedBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedBlinkInProgress = false;
                        }
                        if (pLed->bLedScanBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedScanBlinkInProgress = false;
                        }
                        pLed->bLedWPSBlinkInProgress = true;
@@ -1214,15 +1214,15 @@ static void SwLedControlMode2(struct _adapter *padapter,
                pLed->CurrLedState = LED_OFF;
                pLed->BlinkingLedState = LED_OFF;
                if (pLed->bLedBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedBlinkInProgress = false;
                }
                if (pLed->bLedScanBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedScanBlinkInProgress = false;
                }
                if (pLed->bLedWPSBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedWPSBlinkInProgress = false;
                }
                mod_timer(&pLed->BlinkTimer,
@@ -1248,7 +1248,7 @@ static void SwLedControlMode3(struct _adapter *padapter,
                        if (IS_LED_WPS_BLINKING(pLed))
                                return;
                        if (pLed->bLedBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedBlinkInProgress = false;
                        }
                        pLed->bLedScanBlinkInProgress = true;
@@ -1286,11 +1286,11 @@ static void SwLedControlMode3(struct _adapter *padapter,
                pLed->CurrLedState = LED_ON;
                pLed->BlinkingLedState = LED_ON;
                if (pLed->bLedBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedBlinkInProgress = false;
                }
                if (pLed->bLedScanBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedScanBlinkInProgress = false;
                }
                mod_timer(&pLed->BlinkTimer,
@@ -1300,11 +1300,11 @@ static void SwLedControlMode3(struct _adapter *padapter,
        case LED_CTL_START_WPS_BOTTON:
                if (pLed->bLedWPSBlinkInProgress == false) {
                        if (pLed->bLedBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedBlinkInProgress = false;
                        }
                        if (pLed->bLedScanBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedScanBlinkInProgress = false;
                        }
                        pLed->bLedWPSBlinkInProgress = true;
@@ -1319,7 +1319,7 @@ static void SwLedControlMode3(struct _adapter *padapter,
                break;
        case LED_CTL_STOP_WPS:
                if (pLed->bLedWPSBlinkInProgress) {
-                       del_timer_sync(&(pLed->BlinkTimer));
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedWPSBlinkInProgress = false;
                } else
                        pLed->bLedWPSBlinkInProgress = true;
@@ -1336,7 +1336,7 @@ static void SwLedControlMode3(struct _adapter *padapter,
                break;
        case LED_CTL_STOP_WPS_FAIL:
                if (pLed->bLedWPSBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedWPSBlinkInProgress = false;
                }
                pLed->CurrLedState = LED_OFF;
@@ -1357,15 +1357,15 @@ static void SwLedControlMode3(struct _adapter *padapter,
                pLed->CurrLedState = LED_OFF;
                pLed->BlinkingLedState = LED_OFF;
                if (pLed->bLedBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedBlinkInProgress = false;
                }
                if (pLed->bLedScanBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedScanBlinkInProgress = false;
                }
                if (pLed->bLedWPSBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedWPSBlinkInProgress = false;
                }
                mod_timer(&pLed->BlinkTimer,
@@ -1388,7 +1388,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
        case LED_CTL_START_TO_LINK:
                if (pLed1->bLedWPSBlinkInProgress) {
                        pLed1->bLedWPSBlinkInProgress = false;
-                       del_timer_sync(&pLed1->BlinkTimer);
+                       del_timer(&pLed1->BlinkTimer);
                        pLed1->BlinkingLedState = LED_OFF;
                        pLed1->CurrLedState = LED_OFF;
                        if (pLed1->bLedOn)
@@ -1400,11 +1400,11 @@ static void SwLedControlMode4(struct _adapter *padapter,
                            IS_LED_WPS_BLINKING(pLed))
                                return;
                        if (pLed->bLedBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedBlinkInProgress = false;
                        }
                        if (pLed->bLedNoLinkBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedNoLinkBlinkInProgress = false;
                        }
                        pLed->bLedStartToLinkBlinkInProgress = true;
@@ -1426,7 +1426,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
                if (LedAction == LED_CTL_LINK) {
                        if (pLed1->bLedWPSBlinkInProgress) {
                                pLed1->bLedWPSBlinkInProgress = false;
-                               del_timer_sync(&pLed1->BlinkTimer);
+                               del_timer(&pLed1->BlinkTimer);
                                pLed1->BlinkingLedState = LED_OFF;
                                pLed1->CurrLedState = LED_OFF;
                                if (pLed1->bLedOn)
@@ -1439,7 +1439,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
                            IS_LED_WPS_BLINKING(pLed))
                                return;
                        if (pLed->bLedBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedBlinkInProgress = false;
                        }
                        pLed->bLedNoLinkBlinkInProgress = true;
@@ -1460,11 +1460,11 @@ static void SwLedControlMode4(struct _adapter *padapter,
                        if (IS_LED_WPS_BLINKING(pLed))
                                return;
                        if (pLed->bLedNoLinkBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedNoLinkBlinkInProgress = false;
                        }
                        if (pLed->bLedBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedBlinkInProgress = false;
                        }
                        pLed->bLedScanBlinkInProgress = true;
@@ -1485,7 +1485,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
                            IS_LED_WPS_BLINKING(pLed))
                                return;
                        if (pLed->bLedNoLinkBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedNoLinkBlinkInProgress = false;
                        }
                        pLed->bLedBlinkInProgress = true;
@@ -1503,7 +1503,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
        case LED_CTL_START_WPS_BOTTON:
                if (pLed1->bLedWPSBlinkInProgress) {
                        pLed1->bLedWPSBlinkInProgress = false;
-                       del_timer_sync(&(pLed1->BlinkTimer));
+                       del_timer(&pLed1->BlinkTimer);
                        pLed1->BlinkingLedState = LED_OFF;
                        pLed1->CurrLedState = LED_OFF;
                        if (pLed1->bLedOn)
@@ -1512,15 +1512,15 @@ static void SwLedControlMode4(struct _adapter *padapter,
                }
                if (pLed->bLedWPSBlinkInProgress == false) {
                        if (pLed->bLedNoLinkBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedNoLinkBlinkInProgress = false;
                        }
                        if (pLed->bLedBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedBlinkInProgress = false;
                        }
                        if (pLed->bLedScanBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedScanBlinkInProgress = false;
                        }
                        pLed->bLedWPSBlinkInProgress = true;
@@ -1538,7 +1538,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
                break;
        case LED_CTL_STOP_WPS:  /*WPS connect success*/
                if (pLed->bLedWPSBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedWPSBlinkInProgress = false;
                }
                pLed->bLedNoLinkBlinkInProgress = true;
@@ -1552,7 +1552,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
                break;
        case LED_CTL_STOP_WPS_FAIL:     /*WPS authentication fail*/
                if (pLed->bLedWPSBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedWPSBlinkInProgress = false;
                }
                pLed->bLedNoLinkBlinkInProgress = true;
@@ -1565,7 +1565,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
                          msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
                /*LED1 settings*/
                if (pLed1->bLedWPSBlinkInProgress)
-                       del_timer_sync(&pLed1->BlinkTimer);
+                       del_timer(&pLed1->BlinkTimer);
                else
                        pLed1->bLedWPSBlinkInProgress = true;
                pLed1->CurrLedState = LED_BLINK_WPS_STOP;
@@ -1578,7 +1578,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
                break;
        case LED_CTL_STOP_WPS_FAIL_OVERLAP:     /*WPS session overlap*/
                if (pLed->bLedWPSBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedWPSBlinkInProgress = false;
                }
                pLed->bLedNoLinkBlinkInProgress = true;
@@ -1591,7 +1591,7 @@ static void SwLedControlMode4(struct _adapter *padapter,
                          msecs_to_jiffies(LED_BLINK_NO_LINK_INTERVAL_ALPHA));
                /*LED1 settings*/
                if (pLed1->bLedWPSBlinkInProgress)
-                       del_timer_sync(&pLed1->BlinkTimer);
+                       del_timer(&pLed1->BlinkTimer);
                else
                        pLed1->bLedWPSBlinkInProgress = true;
                pLed1->CurrLedState = LED_BLINK_WPS_STOP_OVERLAP;
@@ -1607,31 +1607,31 @@ static void SwLedControlMode4(struct _adapter *padapter,
                pLed->CurrLedState = LED_OFF;
                pLed->BlinkingLedState = LED_OFF;
                if (pLed->bLedNoLinkBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedNoLinkBlinkInProgress = false;
                }
                if (pLed->bLedLinkBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedLinkBlinkInProgress = false;
                }
                if (pLed->bLedBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedBlinkInProgress = false;
                }
                if (pLed->bLedWPSBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedWPSBlinkInProgress = false;
                }
                if (pLed->bLedScanBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedScanBlinkInProgress = false;
                }
                if (pLed->bLedStartToLinkBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedStartToLinkBlinkInProgress = false;
                }
                if (pLed1->bLedWPSBlinkInProgress) {
-                       del_timer_sync(&pLed1->BlinkTimer);
+                       del_timer(&pLed1->BlinkTimer);
                        pLed1->bLedWPSBlinkInProgress = false;
                }
                pLed1->BlinkingLedState = LED_UNKNOWN;
@@ -1671,7 +1671,7 @@ static void SwLedControlMode5(struct _adapter *padapter,
                        ; /* dummy branch */
                else if (pLed->bLedScanBlinkInProgress == false) {
                        if (pLed->bLedBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedBlinkInProgress = false;
                        }
                        pLed->bLedScanBlinkInProgress = true;
@@ -1705,7 +1705,7 @@ static void SwLedControlMode5(struct _adapter *padapter,
                pLed->CurrLedState = LED_OFF;
                pLed->BlinkingLedState = LED_OFF;
                if (pLed->bLedBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedBlinkInProgress = false;
                }
                SwLedOff(padapter, pLed);
@@ -1756,7 +1756,7 @@ static void SwLedControlMode6(struct _adapter *padapter,
        case LED_CTL_START_WPS_BOTTON:
                if (pLed->bLedWPSBlinkInProgress == false) {
                        if (pLed->bLedBlinkInProgress == true) {
-                               del_timer_sync(&pLed->BlinkTimer);
+                               del_timer(&pLed->BlinkTimer);
                                pLed->bLedBlinkInProgress = false;
                        }
                        pLed->bLedWPSBlinkInProgress = true;
@@ -1772,7 +1772,7 @@ static void SwLedControlMode6(struct _adapter *padapter,
        case LED_CTL_STOP_WPS_FAIL:
        case LED_CTL_STOP_WPS:
                if (pLed->bLedWPSBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedWPSBlinkInProgress = false;
                }
                pLed->CurrLedState = LED_ON;
@@ -1784,11 +1784,11 @@ static void SwLedControlMode6(struct _adapter *padapter,
                pLed->CurrLedState = LED_OFF;
                pLed->BlinkingLedState = LED_OFF;
                if (pLed->bLedBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedBlinkInProgress = false;
                }
                if (pLed->bLedWPSBlinkInProgress) {
-                       del_timer_sync(&pLed->BlinkTimer);
+                       del_timer(&pLed->BlinkTimer);
                        pLed->bLedWPSBlinkInProgress = false;
                }
                SwLedOff(padapter, pLed);
index 1a1c38f885d6b191d5a62b5fb1aae26713dd6cb3..e35854d28f90ed96aa3ff149f39175c9e46b1373 100644 (file)
@@ -910,7 +910,7 @@ void r8712_createbss_cmd_callback(struct _adapter *padapter,
        if (pcmd->res != H2C_SUCCESS)
                mod_timer(&pmlmepriv->assoc_timer,
                          jiffies + msecs_to_jiffies(1));
-       del_timer_sync(&pmlmepriv->assoc_timer);
+       del_timer(&pmlmepriv->assoc_timer);
 #ifdef __BIG_ENDIAN
        /* endian_convert */
        pnetwork->Length = le32_to_cpu(pnetwork->Length);
index 42fba3f5b593e08801a57269ede5e21da8c8841f..cb0b6387789f197dbe2d314b96728d05b6a361c9 100644 (file)
@@ -1900,23 +1900,20 @@ static int r871x_mp_ioctl_hdl(struct net_device *dev,
        struct mp_ioctl_handler *phandler;
        struct mp_ioctl_param *poidparam;
        unsigned long BytesRead, BytesWritten, BytesNeeded;
-       u8 *pparmbuf = NULL, bset;
+       u8 *pparmbuf, bset;
        u16 len;
        uint status;
        int ret = 0;
 
-       if ((!p->length) || (!p->pointer)) {
-               ret = -EINVAL;
-               goto _r871x_mp_ioctl_hdl_exit;
-       }
+       if ((!p->length) || (!p->pointer))
+               return -EINVAL;
+
        bset = (u8)(p->flags & 0xFFFF);
        len = p->length;
-       pparmbuf = NULL;
        pparmbuf = memdup_user(p->pointer, len);
-       if (IS_ERR(pparmbuf)) {
-               ret = PTR_ERR(pparmbuf);
-               goto _r871x_mp_ioctl_hdl_exit;
-       }
+       if (IS_ERR(pparmbuf))
+               return PTR_ERR(pparmbuf);
+
        poidparam = (struct mp_ioctl_param *)pparmbuf;
        if (poidparam->subcode >= MAX_MP_IOCTL_SUBCODE) {
                ret = -EINVAL;
index fb2b195b90af0d1690552dfccb6ec93b13960fdf..c044b0e55ba93d0c989031d52ce99f4008ae0630 100644 (file)
@@ -582,7 +582,7 @@ void r8712_surveydone_event_callback(struct _adapter *adapter, u8 *pbuf)
        spin_lock_irqsave(&pmlmepriv->lock, irqL);
 
        if (check_fwstate(pmlmepriv, _FW_UNDER_SURVEY) == true) {
-               del_timer_sync(&pmlmepriv->scan_to_timer);
+               del_timer(&pmlmepriv->scan_to_timer);
 
                _clr_fwstate_(pmlmepriv, _FW_UNDER_SURVEY);
        }
@@ -696,7 +696,7 @@ void r8712_ind_disconnect(struct _adapter *padapter)
        }
        if (padapter->pwrctrlpriv.pwr_mode !=
            padapter->registrypriv.power_mgnt) {
-               del_timer_sync(&pmlmepriv->dhcp_timer);
+               del_timer(&pmlmepriv->dhcp_timer);
                r8712_set_ps_mode(padapter, padapter->registrypriv.power_mgnt,
                                  padapter->registrypriv.smart_ps);
        }
@@ -910,7 +910,7 @@ void r8712_joinbss_event_callback(struct _adapter *adapter, u8 *pbuf)
                        if (check_fwstate(pmlmepriv, WIFI_STATION_STATE)
                                == true)
                                r8712_indicate_connect(adapter);
-                       del_timer_sync(&pmlmepriv->assoc_timer);
+                       del_timer(&pmlmepriv->assoc_timer);
                } else
                        goto ignore_joinbss_callback;
        } else {
index aaa584435c87d25d3efb3bbbe794da6cf2096c24..9bc04f474d18d7c79311c8bd6fc80b48015a6550 100644 (file)
@@ -103,7 +103,7 @@ void r8712_cpwm_int_hdl(struct _adapter *padapter,
 
        if (pwrpriv->cpwm_tog == ((preportpwrstate->state) & 0x80))
                return;
-       del_timer_sync(&padapter->pwrctrlpriv.rpwm_check_timer);
+       del_timer(&padapter->pwrctrlpriv.rpwm_check_timer);
        _enter_pwrlock(&pwrpriv->lock);
        pwrpriv->cpwm = (preportpwrstate->state) & 0xf;
        if (pwrpriv->cpwm >= PS_STATE_S2) {
index 7bb96c47f1883dad0c62e8618b2e98ac773fca27..a9b93d0f6f566b83bb00271de37f68dc1716586c 100644 (file)
@@ -198,7 +198,7 @@ void r8712_free_stainfo(struct _adapter *padapter, struct sta_info *psta)
         * cancel reordering_ctrl_timer */
        for (i = 0; i < 16; i++) {
                preorder_ctrl = &psta->recvreorder_ctrl[i];
-               del_timer_sync(&preorder_ctrl->reordering_ctrl_timer);
+               del_timer(&preorder_ctrl->reordering_ctrl_timer);
        }
        spin_lock(&(pfree_sta_queue->lock));
        /* insert into free_sta_queue; 20061114 */
index 3c7ea95dd9f93aa0f1619c246d3fea78d0eea155..dbbb2f879a29fb00a59e20733ee118920ff19c59 100644 (file)
@@ -1250,7 +1250,7 @@ err_enable:
        return -ENODEV;
 }
 
-static void __exit lynxfb_pci_remove(struct pci_dev *pdev)
+static void lynxfb_pci_remove(struct pci_dev *pdev)
 {
        struct fb_info *info;
        struct lynx_share *share;
index 1cdcf49b2445094ba5270011251dfe86684766e4..e00c0605d1541556492c8ab46d5d4db8156a30f6 100644 (file)
@@ -362,12 +362,16 @@ bool CARDbSetPhyParameter(struct vnt_private *pDevice, u8 bb_type)
  * Return Value: none
  */
 bool CARDbUpdateTSF(struct vnt_private *pDevice, unsigned char byRxRate,
-                   u64 qwBSSTimestamp, u64 qwLocalTSF)
+                   u64 qwBSSTimestamp)
 {
+       u64 local_tsf;
        u64 qwTSFOffset = 0;
 
-       if (qwBSSTimestamp != qwLocalTSF) {
-               qwTSFOffset = CARDqGetTSFOffset(byRxRate, qwBSSTimestamp, qwLocalTSF);
+       CARDbGetCurrentTSF(pDevice, &local_tsf);
+
+       if (qwBSSTimestamp != local_tsf) {
+               qwTSFOffset = CARDqGetTSFOffset(byRxRate, qwBSSTimestamp,
+                                               local_tsf);
                /* adjust TSF, HW's TSF add TSF Offset reg */
                VNSvOutPortD(pDevice->PortOffset + MAC_REG_TSFOFST, (u32)qwTSFOffset);
                VNSvOutPortD(pDevice->PortOffset + MAC_REG_TSFOFST + 4, (u32)(qwTSFOffset >> 32));
index 2dfc4195227188bf48f1473cc821051739f135da..16cca49e680a3fc74bde7795b7565cfbe61bd17a 100644 (file)
@@ -83,7 +83,7 @@ bool CARDbRadioPowerOff(struct vnt_private *);
 bool CARDbRadioPowerOn(struct vnt_private *);
 bool CARDbSetPhyParameter(struct vnt_private *, u8);
 bool CARDbUpdateTSF(struct vnt_private *, unsigned char byRxRate,
-                   u64 qwBSSTimestamp, u64 qwLocalTSF);
+                   u64 qwBSSTimestamp);
 bool CARDbSetBeaconPeriod(struct vnt_private *, unsigned short wBeaconInterval);
 
 #endif /* __CARD_H__ */
index 4bb4f8ee41321a23134bcaf750ff3d2896350e82..0343ae386f0351bdff320f2f956371540eb931f3 100644 (file)
@@ -912,7 +912,11 @@ static int vnt_int_report_rate(struct vnt_private *priv,
 
        if (!(tsr1 & TSR1_TERR)) {
                info->status.rates[0].idx = idx;
-               info->flags |= IEEE80211_TX_STAT_ACK;
+
+               if (info->flags & IEEE80211_TX_CTL_NO_ACK)
+                       info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
+               else
+                       info->flags |= IEEE80211_TX_STAT_ACK;
        }
 
        return 0;
@@ -937,9 +941,6 @@ static int device_tx_srv(struct vnt_private *pDevice, unsigned int uIdx)
                /* Only the status of first TD in the chain is correct */
                if (pTD->m_td1TD1.byTCR & TCR_STP) {
                        if ((pTD->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB) != 0) {
-
-                               vnt_int_report_rate(pDevice, pTD->pTDInfo, byTsr0, byTsr1);
-
                                if (!(byTsr1 & TSR1_TERR)) {
                                        if (byTsr0 != 0) {
                                                pr_debug(" Tx[%d] OK but has error. tsr1[%02X] tsr0[%02X]\n",
@@ -958,6 +959,9 @@ static int device_tx_srv(struct vnt_private *pDevice, unsigned int uIdx)
                                                 (int)uIdx, byTsr1, byTsr0);
                                }
                        }
+
+                       vnt_int_report_rate(pDevice, pTD->pTDInfo, byTsr0, byTsr1);
+
                        device_free_tx_buf(pDevice, pTD);
                        pDevice->iTDUsed[uIdx]--;
                }
@@ -989,10 +993,8 @@ static void device_free_tx_buf(struct vnt_private *pDevice, PSTxDesc pDesc)
                                 skb->len, DMA_TO_DEVICE);
        }
 
-       if (pTDInfo->byFlags & TD_FLAGS_NETIF_SKB)
+       if (skb)
                ieee80211_tx_status_irqsafe(pDevice->hw, skb);
-       else
-               dev_kfree_skb_irq(skb);
 
        pTDInfo->skb_dma = 0;
        pTDInfo->skb = NULL;
@@ -1204,14 +1206,6 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
        if (dma_idx == TYPE_AC0DMA)
                head_td->pTDInfo->byFlags = TD_FLAGS_NETIF_SKB;
 
-       priv->iTDUsed[dma_idx]++;
-
-       /* Take ownership */
-       wmb();
-       head_td->m_td0TD0.f1Owner = OWNED_BY_NIC;
-
-       /* get Next */
-       wmb();
        priv->apCurrTD[dma_idx] = head_td->next;
 
        spin_unlock_irqrestore(&priv->lock, flags);
@@ -1232,11 +1226,18 @@ static int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
 
        head_td->buff_addr = cpu_to_le32(head_td->pTDInfo->skb_dma);
 
+       /* Poll Transmit the adapter */
+       wmb();
+       head_td->m_td0TD0.f1Owner = OWNED_BY_NIC;
+       wmb(); /* second memory barrier */
+
        if (head_td->pTDInfo->byFlags & TD_FLAGS_NETIF_SKB)
                MACvTransmitAC0(priv->PortOffset);
        else
                MACvTransmit0(priv->PortOffset);
 
+       priv->iTDUsed[dma_idx]++;
+
        spin_unlock_irqrestore(&priv->lock, flags);
 
        return 0;
@@ -1416,9 +1417,16 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
 
        priv->current_aid = conf->aid;
 
-       if (changed & BSS_CHANGED_BSSID)
+       if (changed & BSS_CHANGED_BSSID) {
+               unsigned long flags;
+
+               spin_lock_irqsave(&priv->lock, flags);
+
                MACvWriteBSSIDAddress(priv->PortOffset, (u8 *)conf->bssid);
 
+               spin_unlock_irqrestore(&priv->lock, flags);
+       }
+
        if (changed & BSS_CHANGED_BASIC_RATES) {
                priv->basic_rates = conf->basic_rates;
 
@@ -1477,7 +1485,7 @@ static void vnt_bss_info_changed(struct ieee80211_hw *hw,
        if (changed & BSS_CHANGED_ASSOC && priv->op_mode != NL80211_IFTYPE_AP) {
                if (conf->assoc) {
                        CARDbUpdateTSF(priv, conf->beacon_rate->hw_value,
-                                      conf->sync_device_ts, conf->sync_tsf);
+                                      conf->sync_tsf);
 
                        CARDbSetBeaconPeriod(priv, conf->beacon_int);
 
index f6c2cf8590c4811471a88c9e82be1d4151a11618..5c589962a1e841ad66c41fe2057df51f18cdee31 100644 (file)
@@ -805,10 +805,18 @@ int vnt_tx_packet(struct vnt_private *priv, struct sk_buff *skb)
                vnt_schedule_command(priv, WLAN_CMD_SETPOWER);
        }
 
-       if (current_rate > RATE_11M)
-               pkt_type = priv->packet_type;
-       else
+       if (current_rate > RATE_11M) {
+               if (info->band == IEEE80211_BAND_5GHZ) {
+                       pkt_type = PK_TYPE_11A;
+               } else {
+                       if (tx_rate->flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
+                               pkt_type = PK_TYPE_11GB;
+                       else
+                               pkt_type = PK_TYPE_11GA;
+               }
+       } else {
                pkt_type = PK_TYPE_11B;
+       }
 
        spin_lock_irqsave(&priv->lock, flags);
 
index 34871a628b11124e093231694b0b0fb14b62de80..74e6114ff18f9343e3012cf21c7faadbdf5c6f61 100644 (file)
@@ -230,7 +230,7 @@ int iscsit_access_np(struct iscsi_np *np, struct iscsi_portal_group *tpg)
         * Here we serialize access across the TIQN+TPG Tuple.
         */
        ret = down_interruptible(&tpg->np_login_sem);
-       if ((ret != 0) || signal_pending(current))
+       if (ret != 0)
                return -1;
 
        spin_lock_bh(&tpg->tpg_state_lock);
index 8ce94ff744e6ba1dfd131e5e59a3b18a75639bcb..70d799dfab03c2e3b616b06a635c2a63e8941fda 100644 (file)
@@ -346,6 +346,7 @@ static int iscsi_login_zero_tsih_s1(
        if (IS_ERR(sess->se_sess)) {
                iscsit_tx_login_rsp(conn, ISCSI_STATUS_CLS_TARGET_ERR,
                                ISCSI_LOGIN_STATUS_NO_RESOURCES);
+               kfree(sess->sess_ops);
                kfree(sess);
                return -ENOMEM;
        }
index e8a240818353bb54e2fdf9bb14cb194081c519c2..5e3295fe404d7cc93aae6354f2578bcbca55ee23 100644 (file)
@@ -161,10 +161,7 @@ struct iscsi_portal_group *iscsit_get_tpg_from_np(
 int iscsit_get_tpg(
        struct iscsi_portal_group *tpg)
 {
-       int ret;
-
-       ret = mutex_lock_interruptible(&tpg->tpg_access_lock);
-       return ((ret != 0) || signal_pending(current)) ? -1 : 0;
+       return mutex_lock_interruptible(&tpg->tpg_access_lock);
 }
 
 void iscsit_put_tpg(struct iscsi_portal_group *tpg)
index 75cbde1f7c5b6e34ea7060011c2aca817e4e55f2..4f8d4d459aa4f936a09438cc076fc998a75aa783 100644 (file)
@@ -704,7 +704,7 @@ target_alua_state_check(struct se_cmd *cmd)
 
        if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
                return 0;
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return 0;
 
        if (!port)
@@ -2377,7 +2377,7 @@ ssize_t core_alua_store_secondary_write_metadata(
 
 int core_setup_alua(struct se_device *dev)
 {
-       if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
+       if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
            !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
                struct t10_alua_lu_gp_member *lu_gp_mem;
 
index ddaf76a4ac2aab3c00e70607a76c90a6cb308389..e7b0430a0575d0403dbb38b0fd4d41df1ccce79d 100644 (file)
@@ -212,10 +212,6 @@ static struct config_group *target_core_register_fabric(
 
        pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
                        " %s\n", tf->tf_group.cg_item.ci_name);
-       /*
-        * Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item()
-        */
-       tf->tf_ops.tf_subsys = tf->tf_subsys;
        tf->tf_fabric = &tf->tf_group.cg_item;
        pr_debug("Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric"
                        " for %s\n", name);
@@ -291,10 +287,17 @@ static struct configfs_subsystem target_core_fabrics = {
        },
 };
 
-struct configfs_subsystem *target_core_subsystem[] = {
-       &target_core_fabrics,
-       NULL,
-};
+int target_depend_item(struct config_item *item)
+{
+       return configfs_depend_item(&target_core_fabrics, item);
+}
+EXPORT_SYMBOL(target_depend_item);
+
+void target_undepend_item(struct config_item *item)
+{
+       return configfs_undepend_item(&target_core_fabrics, item);
+}
+EXPORT_SYMBOL(target_undepend_item);
 
 /*##############################################################################
 // Start functions called by external Target Fabrics Modules
@@ -467,7 +470,6 @@ int target_register_template(const struct target_core_fabric_ops *fo)
         * struct target_fabric_configfs->tf_cit_tmpl
         */
        tf->tf_module = fo->module;
-       tf->tf_subsys = target_core_subsystem[0];
        snprintf(tf->tf_name, TARGET_FABRIC_NAME_SIZE, "%s", fo->name);
 
        tf->tf_ops = *fo;
@@ -809,7 +811,7 @@ static ssize_t target_core_dev_pr_show_attr_res_holder(struct se_device *dev,
 {
        int ret;
 
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return sprintf(page, "Passthrough\n");
 
        spin_lock(&dev->dev_reservation_lock);
@@ -960,7 +962,7 @@ SE_DEV_PR_ATTR_RO(res_pr_type);
 static ssize_t target_core_dev_pr_show_attr_res_type(
                struct se_device *dev, char *page)
 {
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return sprintf(page, "SPC_PASSTHROUGH\n");
        else if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
                return sprintf(page, "SPC2_RESERVATIONS\n");
@@ -973,7 +975,7 @@ SE_DEV_PR_ATTR_RO(res_type);
 static ssize_t target_core_dev_pr_show_attr_res_aptpl_active(
                struct se_device *dev, char *page)
 {
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return 0;
 
        return sprintf(page, "APTPL Bit Status: %s\n",
@@ -988,7 +990,7 @@ SE_DEV_PR_ATTR_RO(res_aptpl_active);
 static ssize_t target_core_dev_pr_show_attr_res_aptpl_metadata(
                struct se_device *dev, char *page)
 {
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return 0;
 
        return sprintf(page, "Ready to process PR APTPL metadata..\n");
@@ -1035,7 +1037,7 @@ static ssize_t target_core_dev_pr_store_attr_res_aptpl_metadata(
        u16 port_rpti = 0, tpgt = 0;
        u8 type = 0, scope;
 
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return 0;
        if (dev->dev_reservation_flags & DRF_SPC2_RESERVATIONS)
                return 0;
@@ -2870,7 +2872,7 @@ static int __init target_core_init_configfs(void)
 {
        struct config_group *target_cg, *hba_cg = NULL, *alua_cg = NULL;
        struct config_group *lu_gp_cg = NULL;
-       struct configfs_subsystem *subsys;
+       struct configfs_subsystem *subsys = &target_core_fabrics;
        struct t10_alua_lu_gp *lu_gp;
        int ret;
 
@@ -2878,7 +2880,6 @@ static int __init target_core_init_configfs(void)
                " Engine: %s on %s/%s on "UTS_RELEASE"\n",
                TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
 
-       subsys = target_core_subsystem[0];
        config_group_init(&subsys->su_group);
        mutex_init(&subsys->su_mutex);
 
@@ -3008,13 +3009,10 @@ out_global:
 
 static void __exit target_core_exit_configfs(void)
 {
-       struct configfs_subsystem *subsys;
        struct config_group *hba_cg, *alua_cg, *lu_gp_cg;
        struct config_item *item;
        int i;
 
-       subsys = target_core_subsystem[0];
-
        lu_gp_cg = &alua_lu_gps_group;
        for (i = 0; lu_gp_cg->default_groups[i]; i++) {
                item = &lu_gp_cg->default_groups[i]->cg_item;
@@ -3045,8 +3043,8 @@ static void __exit target_core_exit_configfs(void)
         * We expect subsys->su_group.default_groups to be released
         * by configfs subsystem provider logic..
         */
-       configfs_unregister_subsystem(subsys);
-       kfree(subsys->su_group.default_groups);
+       configfs_unregister_subsystem(&target_core_fabrics);
+       kfree(target_core_fabrics.su_group.default_groups);
 
        core_alua_free_lu_gp(default_lu_gp);
        default_lu_gp = NULL;
index 7faa6aef9a4d5429cbf1d3810ebb181f7a911beb..ce5f768181ff6593a7afac365214c77b0f0aceab 100644 (file)
@@ -33,6 +33,7 @@
 #include <linux/kthread.h>
 #include <linux/in.h>
 #include <linux/export.h>
+#include <asm/unaligned.h>
 #include <net/sock.h>
 #include <net/tcp.h>
 #include <scsi/scsi.h>
@@ -527,7 +528,7 @@ static void core_export_port(
        list_add_tail(&port->sep_list, &dev->dev_sep_list);
        spin_unlock(&dev->se_port_lock);
 
-       if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV &&
+       if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH) &&
            !(dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)) {
                tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
                if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
@@ -1603,7 +1604,7 @@ int target_configure_device(struct se_device *dev)
         * anything virtual (IBLOCK, FILEIO, RAMDISK), but not for TCM/pSCSI
         * passthrough because this is being provided by the backend LLD.
         */
-       if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
+       if (!(dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)) {
                strncpy(&dev->t10_wwn.vendor[0], "LIO-ORG", 8);
                strncpy(&dev->t10_wwn.model[0],
                        dev->transport->inquiry_prod, 16);
@@ -1707,3 +1708,76 @@ void core_dev_release_virtual_lun0(void)
                target_free_device(g_lun0_dev);
        core_delete_hba(hba);
 }
+
+/*
+ * Common CDB parsing for kernel and user passthrough.
+ */
+sense_reason_t
+passthrough_parse_cdb(struct se_cmd *cmd,
+       sense_reason_t (*exec_cmd)(struct se_cmd *cmd))
+{
+       unsigned char *cdb = cmd->t_task_cdb;
+
+       /*
+        * Clear a lun set in the cdb if the initiator talking to use spoke
+        * and old standards version, as we can't assume the underlying device
+        * won't choke up on it.
+        */
+       switch (cdb[0]) {
+       case READ_10: /* SBC - RDProtect */
+       case READ_12: /* SBC - RDProtect */
+       case READ_16: /* SBC - RDProtect */
+       case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
+       case VERIFY: /* SBC - VRProtect */
+       case VERIFY_16: /* SBC - VRProtect */
+       case WRITE_VERIFY: /* SBC - VRProtect */
+       case WRITE_VERIFY_12: /* SBC - VRProtect */
+       case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
+               break;
+       default:
+               cdb[1] &= 0x1f; /* clear logical unit number */
+               break;
+       }
+
+       /*
+        * For REPORT LUNS we always need to emulate the response, for everything
+        * else, pass it up.
+        */
+       if (cdb[0] == REPORT_LUNS) {
+               cmd->execute_cmd = spc_emulate_report_luns;
+               return TCM_NO_SENSE;
+       }
+
+       /* Set DATA_CDB flag for ops that should have it */
+       switch (cdb[0]) {
+       case READ_6:
+       case READ_10:
+       case READ_12:
+       case READ_16:
+       case WRITE_6:
+       case WRITE_10:
+       case WRITE_12:
+       case WRITE_16:
+       case WRITE_VERIFY:
+       case WRITE_VERIFY_12:
+       case 0x8e: /* WRITE_VERIFY_16 */
+       case COMPARE_AND_WRITE:
+       case XDWRITEREAD_10:
+               cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+               break;
+       case VARIABLE_LENGTH_CMD:
+               switch (get_unaligned_be16(&cdb[8])) {
+               case READ_32:
+               case WRITE_32:
+               case 0x0c: /* WRITE_VERIFY_32 */
+               case XDWRITEREAD_32:
+                       cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
+                       break;
+               }
+       }
+
+       cmd->execute_cmd = exec_cmd;
+
+       return TCM_NO_SENSE;
+}
+EXPORT_SYMBOL(passthrough_parse_cdb);
index f7e6e51aed3614aa35e8a58c0bb1d2cfcc462141..3f27bfd816d87201c5f3cec3ad7857ead488191b 100644 (file)
@@ -958,7 +958,6 @@ static struct se_subsystem_api fileio_template = {
        .inquiry_prod           = "FILEIO",
        .inquiry_rev            = FD_VERSION,
        .owner                  = THIS_MODULE,
-       .transport_type         = TRANSPORT_PLUGIN_VHBA_PDEV,
        .attach_hba             = fd_attach_hba,
        .detach_hba             = fd_detach_hba,
        .alloc_device           = fd_alloc_device,
index 1b7947c2510fc8c65872127738c83ccbb34cf6a3..8c965683789f9e141233edac76593e156a58bd2f 100644 (file)
@@ -904,7 +904,6 @@ static struct se_subsystem_api iblock_template = {
        .inquiry_prod           = "IBLOCK",
        .inquiry_rev            = IBLOCK_VERSION,
        .owner                  = THIS_MODULE,
-       .transport_type         = TRANSPORT_PLUGIN_VHBA_PDEV,
        .attach_hba             = iblock_attach_hba,
        .detach_hba             = iblock_detach_hba,
        .alloc_device           = iblock_alloc_device,
index 874a9bc988d807a615a9ed7516041bfe54176c4b..68bd7f5d9f73cf6feacd2dfefb951db99dd21c4f 100644 (file)
@@ -4,9 +4,6 @@
 /* target_core_alua.c */
 extern struct t10_alua_lu_gp *default_lu_gp;
 
-/* target_core_configfs.c */
-extern struct configfs_subsystem *target_core_subsystem[];
-
 /* target_core_device.c */
 extern struct mutex g_device_mutex;
 extern struct list_head g_device_list;
index c1aa9655e96ec13881bdee2040254887fde0e903..a15411c79ae99649041c216439e938f52a7c071a 100644 (file)
@@ -1367,41 +1367,26 @@ void core_scsi3_free_all_registrations(
 
 static int core_scsi3_tpg_depend_item(struct se_portal_group *tpg)
 {
-       return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys,
-                       &tpg->tpg_group.cg_item);
+       return target_depend_item(&tpg->tpg_group.cg_item);
 }
 
 static void core_scsi3_tpg_undepend_item(struct se_portal_group *tpg)
 {
-       configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
-                       &tpg->tpg_group.cg_item);
-
+       target_undepend_item(&tpg->tpg_group.cg_item);
        atomic_dec_mb(&tpg->tpg_pr_ref_count);
 }
 
 static int core_scsi3_nodeacl_depend_item(struct se_node_acl *nacl)
 {
-       struct se_portal_group *tpg = nacl->se_tpg;
-
        if (nacl->dynamic_node_acl)
                return 0;
-
-       return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys,
-                       &nacl->acl_group.cg_item);
+       return target_depend_item(&nacl->acl_group.cg_item);
 }
 
 static void core_scsi3_nodeacl_undepend_item(struct se_node_acl *nacl)
 {
-       struct se_portal_group *tpg = nacl->se_tpg;
-
-       if (nacl->dynamic_node_acl) {
-               atomic_dec_mb(&nacl->acl_pr_ref_count);
-               return;
-       }
-
-       configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
-                       &nacl->acl_group.cg_item);
-
+       if (!nacl->dynamic_node_acl)
+               target_undepend_item(&nacl->acl_group.cg_item);
        atomic_dec_mb(&nacl->acl_pr_ref_count);
 }
 
@@ -1419,8 +1404,7 @@ static int core_scsi3_lunacl_depend_item(struct se_dev_entry *se_deve)
        nacl = lun_acl->se_lun_nacl;
        tpg = nacl->se_tpg;
 
-       return configfs_depend_item(tpg->se_tpg_tfo->tf_subsys,
-                       &lun_acl->se_lun_group.cg_item);
+       return target_depend_item(&lun_acl->se_lun_group.cg_item);
 }
 
 static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
@@ -1438,9 +1422,7 @@ static void core_scsi3_lunacl_undepend_item(struct se_dev_entry *se_deve)
        nacl = lun_acl->se_lun_nacl;
        tpg = nacl->se_tpg;
 
-       configfs_undepend_item(tpg->se_tpg_tfo->tf_subsys,
-                       &lun_acl->se_lun_group.cg_item);
-
+       target_undepend_item(&lun_acl->se_lun_group.cg_item);
        atomic_dec_mb(&se_deve->pr_ref_count);
 }
 
@@ -4111,7 +4093,7 @@ target_check_reservation(struct se_cmd *cmd)
                return 0;
        if (dev->se_hba->hba_flags & HBA_FLAGS_INTERNAL_USE)
                return 0;
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return 0;
 
        spin_lock(&dev->dev_reservation_lock);
index f6c954c4635f5dab27ac24ea117dd284561c836a..ecc5eaef13d6c38956a213a784ae66e1b4e0411b 100644 (file)
@@ -521,6 +521,7 @@ static int pscsi_configure_device(struct se_device *dev)
                                        " pdv_host_id: %d\n", pdv->pdv_host_id);
                                return -EINVAL;
                        }
+                       pdv->pdv_lld_host = sh;
                }
        } else {
                if (phv->phv_mode == PHV_VIRTUAL_HOST_ID) {
@@ -603,6 +604,8 @@ static void pscsi_free_device(struct se_device *dev)
                if ((phv->phv_mode == PHV_LLD_SCSI_HOST_NO) &&
                    (phv->phv_lld_host != NULL))
                        scsi_host_put(phv->phv_lld_host);
+               else if (pdv->pdv_lld_host)
+                       scsi_host_put(pdv->pdv_lld_host);
 
                if ((sd->type == TYPE_DISK) || (sd->type == TYPE_ROM))
                        scsi_device_put(sd);
@@ -970,64 +973,13 @@ fail:
        return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
 }
 
-/*
- * Clear a lun set in the cdb if the initiator talking to use spoke
- * and old standards version, as we can't assume the underlying device
- * won't choke up on it.
- */
-static inline void pscsi_clear_cdb_lun(unsigned char *cdb)
-{
-       switch (cdb[0]) {
-       case READ_10: /* SBC - RDProtect */
-       case READ_12: /* SBC - RDProtect */
-       case READ_16: /* SBC - RDProtect */
-       case SEND_DIAGNOSTIC: /* SPC - SELF-TEST Code */
-       case VERIFY: /* SBC - VRProtect */
-       case VERIFY_16: /* SBC - VRProtect */
-       case WRITE_VERIFY: /* SBC - VRProtect */
-       case WRITE_VERIFY_12: /* SBC - VRProtect */
-       case MAINTENANCE_IN: /* SPC - Parameter Data Format for SA RTPG */
-               break;
-       default:
-               cdb[1] &= 0x1f; /* clear logical unit number */
-               break;
-       }
-}
-
 static sense_reason_t
 pscsi_parse_cdb(struct se_cmd *cmd)
 {
-       unsigned char *cdb = cmd->t_task_cdb;
-
        if (cmd->se_cmd_flags & SCF_BIDI)
                return TCM_UNSUPPORTED_SCSI_OPCODE;
 
-       pscsi_clear_cdb_lun(cdb);
-
-       /*
-        * For REPORT LUNS we always need to emulate the response, for everything
-        * else the default for pSCSI is to pass the command to the underlying
-        * LLD / physical hardware.
-        */
-       switch (cdb[0]) {
-       case REPORT_LUNS:
-               cmd->execute_cmd = spc_emulate_report_luns;
-               return 0;
-       case READ_6:
-       case READ_10:
-       case READ_12:
-       case READ_16:
-       case WRITE_6:
-       case WRITE_10:
-       case WRITE_12:
-       case WRITE_16:
-       case WRITE_VERIFY:
-               cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
-               /* FALLTHROUGH*/
-       default:
-               cmd->execute_cmd = pscsi_execute_cmd;
-               return 0;
-       }
+       return passthrough_parse_cdb(cmd, pscsi_execute_cmd);
 }
 
 static sense_reason_t
@@ -1189,7 +1141,7 @@ static struct configfs_attribute *pscsi_backend_dev_attrs[] = {
 static struct se_subsystem_api pscsi_template = {
        .name                   = "pscsi",
        .owner                  = THIS_MODULE,
-       .transport_type         = TRANSPORT_PLUGIN_PHBA_PDEV,
+       .transport_flags        = TRANSPORT_FLAG_PASSTHROUGH,
        .attach_hba             = pscsi_attach_hba,
        .detach_hba             = pscsi_detach_hba,
        .pmode_enable_hba       = pscsi_pmode_enable_hba,
index 1bd757dff8eee3806cae1d3da6ce33804e5deb17..820d3052b775caf438912d703402ebb4172d7f31 100644 (file)
@@ -45,6 +45,7 @@ struct pscsi_dev_virt {
        int     pdv_lun_id;
        struct block_device *pdv_bd;
        struct scsi_device *pdv_sd;
+       struct Scsi_Host *pdv_lld_host;
 } ____cacheline_aligned;
 
 typedef enum phv_modes {
index a263bf5fab8d4538384f557aef1a3df7df3d9792..d16489b6a1a4767ef4a8ba9445998a7bff2845d8 100644 (file)
@@ -733,7 +733,6 @@ static struct se_subsystem_api rd_mcp_template = {
        .name                   = "rd_mcp",
        .inquiry_prod           = "RAMDISK-MCP",
        .inquiry_rev            = RD_MCP_VERSION,
-       .transport_type         = TRANSPORT_PLUGIN_VHBA_VDEV,
        .attach_hba             = rd_attach_hba,
        .detach_hba             = rd_detach_hba,
        .alloc_device           = rd_alloc_device,
index 8855781ac653026aa0b513340b150e5f33f05f28..733824e3825f4845e9035b9f00a7d553b9d59d6e 100644 (file)
@@ -568,7 +568,7 @@ sbc_compare_and_write(struct se_cmd *cmd)
         * comparision using SGLs at cmd->t_bidi_data_sg..
         */
        rc = down_interruptible(&dev->caw_sem);
-       if ((rc != 0) || signal_pending(current)) {
+       if (rc != 0) {
                cmd->transport_complete_callback = NULL;
                return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
        }
index 3fe5cb240b6f6a5b4c8a3fb42396b77dd5701f74..675f2d9d1f14c69142d63179afa38e5b74255243 100644 (file)
@@ -1196,7 +1196,7 @@ transport_check_alloc_task_attr(struct se_cmd *cmd)
         * Check if SAM Task Attribute emulation is enabled for this
         * struct se_device storage object
         */
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return 0;
 
        if (cmd->sam_task_attr == TCM_ACA_TAG) {
@@ -1770,7 +1770,7 @@ static int target_write_prot_action(struct se_cmd *cmd)
                                                   sectors, 0, NULL, 0);
                if (unlikely(cmd->pi_err)) {
                        spin_lock_irq(&cmd->t_state_lock);
-                       cmd->transport_state &= ~CMD_T_BUSY|CMD_T_SENT;
+                       cmd->transport_state &= ~(CMD_T_BUSY|CMD_T_SENT);
                        spin_unlock_irq(&cmd->t_state_lock);
                        transport_generic_request_failure(cmd, cmd->pi_err);
                        return -1;
@@ -1787,7 +1787,7 @@ static bool target_handle_task_attr(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
 
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return false;
 
        /*
@@ -1868,7 +1868,7 @@ void target_execute_cmd(struct se_cmd *cmd)
 
        if (target_handle_task_attr(cmd)) {
                spin_lock_irq(&cmd->t_state_lock);
-               cmd->transport_state &= ~CMD_T_BUSY|CMD_T_SENT;
+               cmd->transport_state &= ~(CMD_T_BUSY | CMD_T_SENT);
                spin_unlock_irq(&cmd->t_state_lock);
                return;
        }
@@ -1912,7 +1912,7 @@ static void transport_complete_task_attr(struct se_cmd *cmd)
 {
        struct se_device *dev = cmd->se_dev;
 
-       if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV)
+       if (dev->transport->transport_flags & TRANSPORT_FLAG_PASSTHROUGH)
                return;
 
        if (cmd->sam_task_attr == TCM_SIMPLE_TAG) {
@@ -1957,8 +1957,7 @@ static void transport_complete_qf(struct se_cmd *cmd)
        case DMA_TO_DEVICE:
                if (cmd->se_cmd_flags & SCF_BIDI) {
                        ret = cmd->se_tfo->queue_data_in(cmd);
-                       if (ret < 0)
-                               break;
+                       break;
                }
                /* Fall through for DMA_TO_DEVICE */
        case DMA_NONE:
index dbc872a6c9816e95211f5b93bb9f623233d249ba..07d2996d8c1fe922334ee57dfe4d27fd9d7685f8 100644 (file)
@@ -71,13 +71,6 @@ struct tcmu_hba {
        u32 host_id;
 };
 
-/* User wants all cmds or just some */
-enum passthru_level {
-       TCMU_PASS_ALL = 0,
-       TCMU_PASS_IO,
-       TCMU_PASS_INVALID,
-};
-
 #define TCMU_CONFIG_LEN 256
 
 struct tcmu_dev {
@@ -89,7 +82,6 @@ struct tcmu_dev {
 #define TCMU_DEV_BIT_OPEN 0
 #define TCMU_DEV_BIT_BROKEN 1
        unsigned long flags;
-       enum passthru_level pass_level;
 
        struct uio_info uio_info;
 
@@ -683,8 +675,6 @@ static struct se_device *tcmu_alloc_device(struct se_hba *hba, const char *name)
        setup_timer(&udev->timeout, tcmu_device_timedout,
                (unsigned long)udev);
 
-       udev->pass_level = TCMU_PASS_ALL;
-
        return &udev->se_dev;
 }
 
@@ -948,13 +938,13 @@ static void tcmu_free_device(struct se_device *dev)
 }
 
 enum {
-       Opt_dev_config, Opt_dev_size, Opt_err, Opt_pass_level,
+       Opt_dev_config, Opt_dev_size, Opt_hw_block_size, Opt_err,
 };
 
 static match_table_t tokens = {
        {Opt_dev_config, "dev_config=%s"},
        {Opt_dev_size, "dev_size=%u"},
-       {Opt_pass_level, "pass_level=%u"},
+       {Opt_hw_block_size, "hw_block_size=%u"},
        {Opt_err, NULL}
 };
 
@@ -965,7 +955,7 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
        char *orig, *ptr, *opts, *arg_p;
        substring_t args[MAX_OPT_ARGS];
        int ret = 0, token;
-       int arg;
+       unsigned long tmp_ul;
 
        opts = kstrdup(page, GFP_KERNEL);
        if (!opts)
@@ -998,15 +988,23 @@ static ssize_t tcmu_set_configfs_dev_params(struct se_device *dev,
                        if (ret < 0)
                                pr_err("kstrtoul() failed for dev_size=\n");
                        break;
-               case Opt_pass_level:
-                       match_int(args, &arg);
-                       if (arg >= TCMU_PASS_INVALID) {
-                               pr_warn("TCMU: Invalid pass_level: %d\n", arg);
+               case Opt_hw_block_size:
+                       arg_p = match_strdup(&args[0]);
+                       if (!arg_p) {
+                               ret = -ENOMEM;
                                break;
                        }
-
-                       pr_debug("TCMU: Setting pass_level to %d\n", arg);
-                       udev->pass_level = arg;
+                       ret = kstrtoul(arg_p, 0, &tmp_ul);
+                       kfree(arg_p);
+                       if (ret < 0) {
+                               pr_err("kstrtoul() failed for hw_block_size=\n");
+                               break;
+                       }
+                       if (!tmp_ul) {
+                               pr_err("hw_block_size must be nonzero\n");
+                               break;
+                       }
+                       dev->dev_attrib.hw_block_size = tmp_ul;
                        break;
                default:
                        break;
@@ -1024,8 +1022,7 @@ static ssize_t tcmu_show_configfs_dev_params(struct se_device *dev, char *b)
 
        bl = sprintf(b + bl, "Config: %s ",
                     udev->dev_config[0] ? udev->dev_config : "NULL");
-       bl += sprintf(b + bl, "Size: %zu PassLevel: %u\n",
-                     udev->dev_size, udev->pass_level);
+       bl += sprintf(b + bl, "Size: %zu\n", udev->dev_size);
 
        return bl;
 }
@@ -1038,20 +1035,6 @@ static sector_t tcmu_get_blocks(struct se_device *dev)
                       dev->dev_attrib.block_size);
 }
 
-static sense_reason_t
-tcmu_execute_rw(struct se_cmd *se_cmd, struct scatterlist *sgl, u32 sgl_nents,
-               enum dma_data_direction data_direction)
-{
-       int ret;
-
-       ret = tcmu_queue_cmd(se_cmd);
-
-       if (ret != 0)
-               return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
-       else
-               return TCM_NO_SENSE;
-}
-
 static sense_reason_t
 tcmu_pass_op(struct se_cmd *se_cmd)
 {
@@ -1063,91 +1046,29 @@ tcmu_pass_op(struct se_cmd *se_cmd)
                return TCM_NO_SENSE;
 }
 
-static struct sbc_ops tcmu_sbc_ops = {
-       .execute_rw = tcmu_execute_rw,
-       .execute_sync_cache     = tcmu_pass_op,
-       .execute_write_same     = tcmu_pass_op,
-       .execute_write_same_unmap = tcmu_pass_op,
-       .execute_unmap          = tcmu_pass_op,
-};
-
 static sense_reason_t
 tcmu_parse_cdb(struct se_cmd *cmd)
 {
-       unsigned char *cdb = cmd->t_task_cdb;
-       struct tcmu_dev *udev = TCMU_DEV(cmd->se_dev);
-       sense_reason_t ret;
-
-       switch (udev->pass_level) {
-       case TCMU_PASS_ALL:
-               /* We're just like pscsi, then */
-               /*
-                * For REPORT LUNS we always need to emulate the response, for everything
-                * else, pass it up.
-                */
-               switch (cdb[0]) {
-               case REPORT_LUNS:
-                       cmd->execute_cmd = spc_emulate_report_luns;
-                       break;
-               case READ_6:
-               case READ_10:
-               case READ_12:
-               case READ_16:
-               case WRITE_6:
-               case WRITE_10:
-               case WRITE_12:
-               case WRITE_16:
-               case WRITE_VERIFY:
-                       cmd->se_cmd_flags |= SCF_SCSI_DATA_CDB;
-                       /* FALLTHROUGH */
-               default:
-                       cmd->execute_cmd = tcmu_pass_op;
-               }
-               ret = TCM_NO_SENSE;
-               break;
-       case TCMU_PASS_IO:
-               ret = sbc_parse_cdb(cmd, &tcmu_sbc_ops);
-               break;
-       default:
-               pr_err("Unknown tcm-user pass level %d\n", udev->pass_level);
-               ret = TCM_CHECK_CONDITION_ABORT_CMD;
-       }
-
-       return ret;
+       return passthrough_parse_cdb(cmd, tcmu_pass_op);
 }
 
-DEF_TB_DEFAULT_ATTRIBS(tcmu);
+DEF_TB_DEV_ATTRIB_RO(tcmu, hw_pi_prot_type);
+TB_DEV_ATTR_RO(tcmu, hw_pi_prot_type);
+
+DEF_TB_DEV_ATTRIB_RO(tcmu, hw_block_size);
+TB_DEV_ATTR_RO(tcmu, hw_block_size);
+
+DEF_TB_DEV_ATTRIB_RO(tcmu, hw_max_sectors);
+TB_DEV_ATTR_RO(tcmu, hw_max_sectors);
+
+DEF_TB_DEV_ATTRIB_RO(tcmu, hw_queue_depth);
+TB_DEV_ATTR_RO(tcmu, hw_queue_depth);
 
 static struct configfs_attribute *tcmu_backend_dev_attrs[] = {
-       &tcmu_dev_attrib_emulate_model_alias.attr,
-       &tcmu_dev_attrib_emulate_dpo.attr,
-       &tcmu_dev_attrib_emulate_fua_write.attr,
-       &tcmu_dev_attrib_emulate_fua_read.attr,
-       &tcmu_dev_attrib_emulate_write_cache.attr,
-       &tcmu_dev_attrib_emulate_ua_intlck_ctrl.attr,
-       &tcmu_dev_attrib_emulate_tas.attr,
-       &tcmu_dev_attrib_emulate_tpu.attr,
-       &tcmu_dev_attrib_emulate_tpws.attr,
-       &tcmu_dev_attrib_emulate_caw.attr,
-       &tcmu_dev_attrib_emulate_3pc.attr,
-       &tcmu_dev_attrib_pi_prot_type.attr,
        &tcmu_dev_attrib_hw_pi_prot_type.attr,
-       &tcmu_dev_attrib_pi_prot_format.attr,
-       &tcmu_dev_attrib_enforce_pr_isids.attr,
-       &tcmu_dev_attrib_is_nonrot.attr,
-       &tcmu_dev_attrib_emulate_rest_reord.attr,
-       &tcmu_dev_attrib_force_pr_aptpl.attr,
        &tcmu_dev_attrib_hw_block_size.attr,
-       &tcmu_dev_attrib_block_size.attr,
        &tcmu_dev_attrib_hw_max_sectors.attr,
-       &tcmu_dev_attrib_optimal_sectors.attr,
        &tcmu_dev_attrib_hw_queue_depth.attr,
-       &tcmu_dev_attrib_queue_depth.attr,
-       &tcmu_dev_attrib_max_unmap_lba_count.attr,
-       &tcmu_dev_attrib_max_unmap_block_desc_count.attr,
-       &tcmu_dev_attrib_unmap_granularity.attr,
-       &tcmu_dev_attrib_unmap_granularity_alignment.attr,
-       &tcmu_dev_attrib_max_write_same_len.attr,
        NULL,
 };
 
@@ -1156,7 +1077,7 @@ static struct se_subsystem_api tcmu_template = {
        .inquiry_prod           = "USER",
        .inquiry_rev            = TCMU_VERSION,
        .owner                  = THIS_MODULE,
-       .transport_type         = TRANSPORT_PLUGIN_VHBA_PDEV,
+       .transport_flags        = TRANSPORT_FLAG_PASSTHROUGH,
        .attach_hba             = tcmu_attach_hba,
        .detach_hba             = tcmu_detach_hba,
        .alloc_device           = tcmu_alloc_device,
index a600ff15dcfd1674140170b0808d494db64333ea..8fd680ac941bde49cd7803134da5beb77c7092b0 100644 (file)
@@ -58,7 +58,6 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
                                        bool src)
 {
        struct se_device *se_dev;
-       struct configfs_subsystem *subsys = target_core_subsystem[0];
        unsigned char tmp_dev_wwn[XCOPY_NAA_IEEE_REGEX_LEN], *dev_wwn;
        int rc;
 
@@ -90,8 +89,7 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
                                " se_dev\n", xop->src_dev);
                }
 
-               rc = configfs_depend_item(subsys,
-                               &se_dev->dev_group.cg_item);
+               rc = target_depend_item(&se_dev->dev_group.cg_item);
                if (rc != 0) {
                        pr_err("configfs_depend_item attempt failed:"
                                " %d for se_dev: %p\n", rc, se_dev);
@@ -99,8 +97,8 @@ static int target_xcopy_locate_se_dev_e4(struct se_cmd *se_cmd, struct xcopy_op
                        return rc;
                }
 
-               pr_debug("Called configfs_depend_item for subsys: %p se_dev: %p"
-                       " se_dev->se_dev_group: %p\n", subsys, se_dev,
+               pr_debug("Called configfs_depend_item for se_dev: %p"
+                       " se_dev->se_dev_group: %p\n", se_dev,
                        &se_dev->dev_group);
 
                mutex_unlock(&g_device_mutex);
@@ -373,7 +371,6 @@ static int xcopy_pt_get_cmd_state(struct se_cmd *se_cmd)
 
 static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
 {
-       struct configfs_subsystem *subsys = target_core_subsystem[0];
        struct se_device *remote_dev;
 
        if (xop->op_origin == XCOL_SOURCE_RECV_OP)
@@ -381,11 +378,11 @@ static void xcopy_pt_undepend_remotedev(struct xcopy_op *xop)
        else
                remote_dev = xop->src_dev;
 
-       pr_debug("Calling configfs_undepend_item for subsys: %p"
+       pr_debug("Calling configfs_undepend_item for"
                  " remote_dev: %p remote_dev->dev_group: %p\n",
-                 subsys, remote_dev, &remote_dev->dev_group.cg_item);
+                 remote_dev, &remote_dev->dev_group.cg_item);
 
-       configfs_undepend_item(subsys, &remote_dev->dev_group.cg_item);
+       target_undepend_item(&remote_dev->dev_group.cg_item);
 }
 
 static void xcopy_pt_release_cmd(struct se_cmd *se_cmd)
index c2556cf5186bc75b71870ed6a304fda430b8521b..01255fd65135949ce78b7f94fde7bccc196ac75c 100644 (file)
@@ -224,9 +224,9 @@ static const struct armada_thermal_data armada380_data = {
        .is_valid_shift = 10,
        .temp_shift = 0,
        .temp_mask = 0x3ff,
-       .coef_b = 1169498786UL,
-       .coef_m = 2000000UL,
-       .coef_div = 4289,
+       .coef_b = 2931108200UL,
+       .coef_m = 5000000UL,
+       .coef_div = 10502,
        .inverted = true,
 };
 
index 12623bc02f46679674d9bd1c3f1574fc21b57c37..725718e97a0bc86f8d69d3fc112fba7670543402 100644 (file)
@@ -206,51 +206,57 @@ static void find_target_mwait(void)
 
 }
 
+struct pkg_cstate_info {
+       bool skip;
+       int msr_index;
+       int cstate_id;
+};
+
+#define PKG_CSTATE_INIT(id) {                          \
+               .msr_index = MSR_PKG_C##id##_RESIDENCY, \
+               .cstate_id = id                         \
+                       }
+
+static struct pkg_cstate_info pkg_cstates[] = {
+       PKG_CSTATE_INIT(2),
+       PKG_CSTATE_INIT(3),
+       PKG_CSTATE_INIT(6),
+       PKG_CSTATE_INIT(7),
+       PKG_CSTATE_INIT(8),
+       PKG_CSTATE_INIT(9),
+       PKG_CSTATE_INIT(10),
+       {NULL},
+};
+
 static bool has_pkg_state_counter(void)
 {
-       u64 tmp;
-       return !rdmsrl_safe(MSR_PKG_C2_RESIDENCY, &tmp) ||
-              !rdmsrl_safe(MSR_PKG_C3_RESIDENCY, &tmp) ||
-              !rdmsrl_safe(MSR_PKG_C6_RESIDENCY, &tmp) ||
-              !rdmsrl_safe(MSR_PKG_C7_RESIDENCY, &tmp);
+       u64 val;
+       struct pkg_cstate_info *info = pkg_cstates;
+
+       /* check if any one of the counter msrs exists */
+       while (info->msr_index) {
+               if (!rdmsrl_safe(info->msr_index, &val))
+                       return true;
+               info++;
+       }
+
+       return false;
 }
 
 static u64 pkg_state_counter(void)
 {
        u64 val;
        u64 count = 0;
-
-       static bool skip_c2;
-       static bool skip_c3;
-       static bool skip_c6;
-       static bool skip_c7;
-
-       if (!skip_c2) {
-               if (!rdmsrl_safe(MSR_PKG_C2_RESIDENCY, &val))
-                       count += val;
-               else
-                       skip_c2 = true;
-       }
-
-       if (!skip_c3) {
-               if (!rdmsrl_safe(MSR_PKG_C3_RESIDENCY, &val))
-                       count += val;
-               else
-                       skip_c3 = true;
-       }
-
-       if (!skip_c6) {
-               if (!rdmsrl_safe(MSR_PKG_C6_RESIDENCY, &val))
-                       count += val;
-               else
-                       skip_c6 = true;
-       }
-
-       if (!skip_c7) {
-               if (!rdmsrl_safe(MSR_PKG_C7_RESIDENCY, &val))
-                       count += val;
-               else
-                       skip_c7 = true;
+       struct pkg_cstate_info *info = pkg_cstates;
+
+       while (info->msr_index) {
+               if (!info->skip) {
+                       if (!rdmsrl_safe(info->msr_index, &val))
+                               count += val;
+                       else
+                               info->skip = true;
+               }
+               info++;
        }
 
        return count;
@@ -667,7 +673,7 @@ static struct thermal_cooling_device_ops powerclamp_cooling_ops = {
 };
 
 /* runs on Nehalem and later */
-static const struct x86_cpu_id intel_powerclamp_ids[] = {
+static const struct x86_cpu_id intel_powerclamp_ids[] __initconst = {
        { X86_VENDOR_INTEL, 6, 0x1a},
        { X86_VENDOR_INTEL, 6, 0x1c},
        { X86_VENDOR_INTEL, 6, 0x1e},
@@ -689,12 +695,13 @@ static const struct x86_cpu_id intel_powerclamp_ids[] = {
        { X86_VENDOR_INTEL, 6, 0x46},
        { X86_VENDOR_INTEL, 6, 0x4c},
        { X86_VENDOR_INTEL, 6, 0x4d},
+       { X86_VENDOR_INTEL, 6, 0x4f},
        { X86_VENDOR_INTEL, 6, 0x56},
        {}
 };
 MODULE_DEVICE_TABLE(x86cpu, intel_powerclamp_ids);
 
-static int powerclamp_probe(void)
+static int __init powerclamp_probe(void)
 {
        if (!x86_match_cpu(intel_powerclamp_ids)) {
                pr_err("Intel powerclamp does not run on family %d model %d\n",
@@ -760,7 +767,7 @@ file_error:
        debugfs_remove_recursive(debug_dir);
 }
 
-static int powerclamp_init(void)
+static int __init powerclamp_init(void)
 {
        int retval;
        int bitmap_size;
@@ -809,7 +816,7 @@ exit_free:
 }
 module_init(powerclamp_init);
 
-static void powerclamp_exit(void)
+static void __exit powerclamp_exit(void)
 {
        unregister_hotcpu_notifier(&powerclamp_cpu_notifier);
        end_power_clamp();
index 3aa46ac7cdbc33765a90279da09fd84507a09d6c..cd8f5f93b42c45aa4cde0f8c4aa346836006f6da 100644 (file)
@@ -529,7 +529,7 @@ static int rockchip_thermal_probe(struct platform_device *pdev)
 
        thermal->pclk = devm_clk_get(&pdev->dev, "apb_pclk");
        if (IS_ERR(thermal->pclk)) {
-               error = PTR_ERR(thermal->clk);
+               error = PTR_ERR(thermal->pclk);
                dev_err(&pdev->dev, "failed to get apb_pclk clock: %d\n",
                        error);
                return error;
index 0531c752fbbb6680c40e939ad2a14fdc1830f357..8e391812e50377664f079cb83f61c380b16e6d93 100644 (file)
@@ -103,7 +103,7 @@ static inline int of_thermal_get_ntrips(struct thermal_zone_device *tz)
 static inline bool of_thermal_is_trip_valid(struct thermal_zone_device *tz,
                                            int trip)
 {
-       return 0;
+       return false;
 }
 static inline const struct thermal_trip *
 of_thermal_get_trip_points(struct thermal_zone_device *tz)
index a4929272074f3f8a161ff7978289af475560b9eb..58b5c6694cd4361472b34fa941890e5121d3dba9 100644 (file)
@@ -420,7 +420,8 @@ const struct ti_bandgap_data dra752_data = {
                        TI_BANDGAP_FEATURE_FREEZE_BIT |
                        TI_BANDGAP_FEATURE_TALERT |
                        TI_BANDGAP_FEATURE_COUNTER_DELAY |
-                       TI_BANDGAP_FEATURE_HISTORY_BUFFER,
+                       TI_BANDGAP_FEATURE_HISTORY_BUFFER |
+                       TI_BANDGAP_FEATURE_ERRATA_814,
        .fclock_name = "l3instr_ts_gclk_div",
        .div_ck_name = "l3instr_ts_gclk_div",
        .conv_table = dra752_adc_to_temp,
index eff0c80fd4af50110cde6b4dc5cba3500787e91e..79ff70c446ba195ef893125b377567ff4ac2e58b 100644 (file)
@@ -319,7 +319,8 @@ const struct ti_bandgap_data omap5430_data = {
                        TI_BANDGAP_FEATURE_FREEZE_BIT |
                        TI_BANDGAP_FEATURE_TALERT |
                        TI_BANDGAP_FEATURE_COUNTER_DELAY |
-                       TI_BANDGAP_FEATURE_HISTORY_BUFFER,
+                       TI_BANDGAP_FEATURE_HISTORY_BUFFER |
+                       TI_BANDGAP_FEATURE_ERRATA_813,
        .fclock_name = "l3instr_ts_gclk_div",
        .div_ck_name = "l3instr_ts_gclk_div",
        .conv_table = omap5430_adc_to_temp,
index 62a5d449c38805019db7d554e6c0f7f43d215341..bc14dc874594e4d9fd37a68874bb6adb6eb4b562 100644 (file)
@@ -118,6 +118,37 @@ exit:
        return ret;
 }
 
+/**
+ * ti_errata814_bandgap_read_temp() - helper function to read dra7 sensor temperature
+ * @bgp: pointer to ti_bandgap structure
+ * @reg: desired register (offset) to be read
+ *
+ * Function to read dra7 bandgap sensor temperature. This is done separately
+ * so as to workaround the errata "Bandgap Temperature read Dtemp can be
+ * corrupted" - Errata ID: i814".
+ * Read accesses to registers listed below can be corrupted due to incorrect
+ * resynchronization between clock domains.
+ * Read access to registers below can be corrupted :
+ * CTRL_CORE_DTEMP_MPU/GPU/CORE/DSPEVE/IVA_n (n = 0 to 4)
+ * CTRL_CORE_TEMP_SENSOR_MPU/GPU/CORE/DSPEVE/IVA_n
+ *
+ * Return: the register value.
+ */
+static u32 ti_errata814_bandgap_read_temp(struct ti_bandgap *bgp,  u32 reg)
+{
+       u32 val1, val2;
+
+       val1 = ti_bandgap_readl(bgp, reg);
+       val2 = ti_bandgap_readl(bgp, reg);
+
+       /* If both times we read the same value then that is right */
+       if (val1 == val2)
+               return val1;
+
+       /* if val1 and val2 are different read it third time */
+       return ti_bandgap_readl(bgp, reg);
+}
+
 /**
  * ti_bandgap_read_temp() - helper function to read sensor temperature
  * @bgp: pointer to ti_bandgap structure
@@ -148,7 +179,11 @@ static u32 ti_bandgap_read_temp(struct ti_bandgap *bgp, int id)
        }
 
        /* read temperature */
-       temp = ti_bandgap_readl(bgp, reg);
+       if (TI_BANDGAP_HAS(bgp, ERRATA_814))
+               temp = ti_errata814_bandgap_read_temp(bgp, reg);
+       else
+               temp = ti_bandgap_readl(bgp, reg);
+
        temp &= tsr->bgap_dtemp_mask;
 
        if (TI_BANDGAP_HAS(bgp, FREEZE_BIT))
@@ -410,7 +445,7 @@ static int ti_bandgap_update_alert_threshold(struct ti_bandgap *bgp, int id,
 {
        struct temp_sensor_data *ts_data = bgp->conf->sensors[id].ts_data;
        struct temp_sensor_registers *tsr;
-       u32 thresh_val, reg_val, t_hot, t_cold;
+       u32 thresh_val, reg_val, t_hot, t_cold, ctrl;
        int err = 0;
 
        tsr = bgp->conf->sensors[id].registers;
@@ -442,8 +477,47 @@ static int ti_bandgap_update_alert_threshold(struct ti_bandgap *bgp, int id,
                  ~(tsr->threshold_thot_mask | tsr->threshold_tcold_mask);
        reg_val |= (t_hot << __ffs(tsr->threshold_thot_mask)) |
                   (t_cold << __ffs(tsr->threshold_tcold_mask));
+
+       /**
+        * Errata i813:
+        * Spurious Thermal Alert: Talert can happen randomly while the device
+        * remains under the temperature limit defined for this event to trig.
+        * This spurious event is caused by a incorrect re-synchronization
+        * between clock domains. The comparison between configured threshold
+        * and current temperature value can happen while the value is
+        * transitioning (metastable), thus causing inappropriate event
+        * generation. No spurious event occurs as long as the threshold value
+        * stays unchanged. Spurious event can be generated while a thermal
+        * alert threshold is modified in
+        * CONTROL_BANDGAP_THRESHOLD_MPU/GPU/CORE/DSPEVE/IVA_n.
+        */
+
+       if (TI_BANDGAP_HAS(bgp, ERRATA_813)) {
+               /* Mask t_hot and t_cold events at the IP Level */
+               ctrl = ti_bandgap_readl(bgp, tsr->bgap_mask_ctrl);
+
+               if (hot)
+                       ctrl &= ~tsr->mask_hot_mask;
+               else
+                       ctrl &= ~tsr->mask_cold_mask;
+
+               ti_bandgap_writel(bgp, ctrl, tsr->bgap_mask_ctrl);
+       }
+
+       /* Write the threshold value */
        ti_bandgap_writel(bgp, reg_val, tsr->bgap_threshold);
 
+       if (TI_BANDGAP_HAS(bgp, ERRATA_813)) {
+               /* Unmask t_hot and t_cold events at the IP Level */
+               ctrl = ti_bandgap_readl(bgp, tsr->bgap_mask_ctrl);
+               if (hot)
+                       ctrl |= tsr->mask_hot_mask;
+               else
+                       ctrl |= tsr->mask_cold_mask;
+
+               ti_bandgap_writel(bgp, ctrl, tsr->bgap_mask_ctrl);
+       }
+
        if (err) {
                dev_err(bgp->dev, "failed to reprogram thot threshold\n");
                err = -EIO;
index b3adf72f252d310779e5014b9ee272bf2a499946..0c52f7afba00b5335adbeba60cd31f547d2256a3 100644 (file)
@@ -318,6 +318,10 @@ struct ti_temp_sensor {
  * TI_BANDGAP_FEATURE_HISTORY_BUFFER - used when the bandgap device features
  *     a history buffer of temperatures.
  *
+ * TI_BANDGAP_FEATURE_ERRATA_814 - used to workaorund when the bandgap device
+ *     has Errata 814
+ * TI_BANDGAP_FEATURE_ERRATA_813 - used to workaorund when the bandgap device
+ *     has Errata 813
  * TI_BANDGAP_HAS(b, f) - macro to check if a bandgap device is capable of a
  *      specific feature (above) or not. Return non-zero, if yes.
  */
@@ -331,6 +335,8 @@ struct ti_temp_sensor {
 #define TI_BANDGAP_FEATURE_FREEZE_BIT          BIT(7)
 #define TI_BANDGAP_FEATURE_COUNTER_DELAY       BIT(8)
 #define TI_BANDGAP_FEATURE_HISTORY_BUFFER      BIT(9)
+#define TI_BANDGAP_FEATURE_ERRATA_814          BIT(10)
+#define TI_BANDGAP_FEATURE_ERRATA_813          BIT(11)
 #define TI_BANDGAP_HAS(b, f)                   \
                        ((b)->conf->features & TI_BANDGAP_FEATURE_ ## f)
 
index 5bab1c684bb11024c2e5a7ee143c18aefc54349b..7a3d146a5f0efc0dbc3b94e854adb4d81c85da4d 100644 (file)
@@ -289,7 +289,7 @@ static int xen_initial_domain_console_init(void)
                        return -ENOMEM;
        }
 
-       info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0);
+       info->irq = bind_virq_to_irq(VIRQ_CONSOLE, 0, false);
        info->vtermno = HVC_COOKIE;
 
        spin_lock(&xencons_lock);
index 04d9e23d1ee16a508e0e0b1331407bcf6b20a94b..358323c83b4f340dec1a915ef923145fb972d933 100644 (file)
@@ -174,13 +174,13 @@ struct mips_ejtag_fdc_tty {
 static inline void mips_ejtag_fdc_write(struct mips_ejtag_fdc_tty *priv,
                                        unsigned int offs, unsigned int data)
 {
-       iowrite32(data, priv->reg + offs);
+       __raw_writel(data, priv->reg + offs);
 }
 
 static inline unsigned int mips_ejtag_fdc_read(struct mips_ejtag_fdc_tty *priv,
                                               unsigned int offs)
 {
-       return ioread32(priv->reg + offs);
+       return __raw_readl(priv->reg + offs);
 }
 
 /* Encoding of byte stream in FDC words */
@@ -347,9 +347,9 @@ static void mips_ejtag_fdc_console_write(struct console *c, const char *s,
                s += inc[word.bytes - 1];
 
                /* Busy wait until there's space in fifo */
-               while (ioread32(regs + REG_FDSTAT) & REG_FDSTAT_TXF)
+               while (__raw_readl(regs + REG_FDSTAT) & REG_FDSTAT_TXF)
                        ;
-               iowrite32(word.word, regs + REG_FDTX(c->index));
+               __raw_writel(word.word, regs + REG_FDTX(c->index));
        }
 out:
        local_irq_restore(flags);
@@ -1227,7 +1227,7 @@ static int kgdbfdc_read_char(void)
 
                /* Read next word from KGDB channel */
                do {
-                       stat = ioread32(regs + REG_FDSTAT);
+                       stat = __raw_readl(regs + REG_FDSTAT);
 
                        /* No data waiting? */
                        if (stat & REG_FDSTAT_RXE)
@@ -1236,7 +1236,7 @@ static int kgdbfdc_read_char(void)
                        /* Read next word */
                        channel = (stat & REG_FDSTAT_RXCHAN) >>
                                        REG_FDSTAT_RXCHAN_SHIFT;
-                       data = ioread32(regs + REG_FDRX);
+                       data = __raw_readl(regs + REG_FDRX);
                } while (channel != CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN);
 
                /* Decode into rbuf */
@@ -1266,9 +1266,10 @@ static void kgdbfdc_push_one(void)
                return;
 
        /* Busy wait until there's space in fifo */
-       while (ioread32(regs + REG_FDSTAT) & REG_FDSTAT_TXF)
+       while (__raw_readl(regs + REG_FDSTAT) & REG_FDSTAT_TXF)
                ;
-       iowrite32(word.word, regs + REG_FDTX(CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN));
+       __raw_writel(word.word,
+                    regs + REG_FDTX(CONFIG_MIPS_EJTAG_FDC_KGDB_CHAN));
 }
 
 /* flush the whole write buffer to the TX FIFO */
index 91abc00aa833b8493fe721e05024cd8368e23aa6..2c34c3249972de85f650d34fa0b25448efba3686 100644 (file)
@@ -3170,7 +3170,7 @@ static int gsmtty_break_ctl(struct tty_struct *tty, int state)
        return gsmtty_modem_update(dlci, encode);
 }
 
-static void gsmtty_remove(struct tty_driver *driver, struct tty_struct *tty)
+static void gsmtty_cleanup(struct tty_struct *tty)
 {
        struct gsm_dlci *dlci = tty->driver_data;
        struct gsm_mux *gsm = dlci->gsm;
@@ -3178,7 +3178,6 @@ static void gsmtty_remove(struct tty_driver *driver, struct tty_struct *tty)
        dlci_put(dlci);
        dlci_put(gsm->dlci[0]);
        mux_put(gsm);
-       driver->ttys[tty->index] = NULL;
 }
 
 /* Virtual ttys for the demux */
@@ -3199,7 +3198,7 @@ static const struct tty_operations gsmtty_ops = {
        .tiocmget               = gsmtty_tiocmget,
        .tiocmset               = gsmtty_tiocmset,
        .break_ctl              = gsmtty_break_ctl,
-       .remove                 = gsmtty_remove,
+       .cleanup                = gsmtty_cleanup,
 };
 
 
index 644ddb841d9f54085bb82a903034af5aaf42de45..bbc4ce66c2c18dd30fb10b80f955a4f4565225c5 100644 (file)
@@ -600,7 +600,7 @@ static ssize_t n_hdlc_tty_read(struct tty_struct *tty, struct file *file,
        add_wait_queue(&tty->read_wait, &wait);
 
        for (;;) {
-               if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
+               if (test_bit(TTY_OTHER_DONE, &tty->flags)) {
                        ret = -EIO;
                        break;
                }
@@ -828,7 +828,7 @@ static unsigned int n_hdlc_tty_poll(struct tty_struct *tty, struct file *filp,
                /* set bits for operations that won't block */
                if (n_hdlc->rx_buf_list.head)
                        mask |= POLLIN | POLLRDNORM;    /* readable */
-               if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
+               if (test_bit(TTY_OTHER_DONE, &tty->flags))
                        mask |= POLLHUP;
                if (tty_hung_up_p(filp))
                        mask |= POLLHUP;
index cf6e0f2e1331fd46310a6834d99896e2b943ea19..396344cb011fd1fafab05c3ddeeff1841e13e055 100644 (file)
@@ -162,6 +162,17 @@ static inline int tty_put_user(struct tty_struct *tty, unsigned char x,
        return put_user(x, ptr);
 }
 
+static inline int tty_copy_to_user(struct tty_struct *tty,
+                                       void __user *to,
+                                       const void *from,
+                                       unsigned long n)
+{
+       struct n_tty_data *ldata = tty->disc_data;
+
+       tty_audit_add_data(tty, to, n, ldata->icanon);
+       return copy_to_user(to, from, n);
+}
+
 /**
  *     n_tty_kick_worker - start input worker (if required)
  *     @tty: terminal
@@ -1949,6 +1960,18 @@ static inline int input_available_p(struct tty_struct *tty, int poll)
                return ldata->commit_head - ldata->read_tail >= amt;
 }
 
+static inline int check_other_done(struct tty_struct *tty)
+{
+       int done = test_bit(TTY_OTHER_DONE, &tty->flags);
+       if (done) {
+               /* paired with cmpxchg() in check_other_closed(); ensures
+                * read buffer head index is not stale
+                */
+               smp_mb__after_atomic();
+       }
+       return done;
+}
+
 /**
  *     copy_from_read_buf      -       copy read data directly
  *     @tty: terminal device
@@ -2058,8 +2081,8 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
 
        size = N_TTY_BUF_SIZE - tail;
        n = eol - tail;
-       if (n > 4096)
-               n += 4096;
+       if (n > N_TTY_BUF_SIZE)
+               n += N_TTY_BUF_SIZE;
        n += found;
        c = n;
 
@@ -2072,12 +2095,12 @@ static int canon_copy_from_read_buf(struct tty_struct *tty,
                    __func__, eol, found, n, c, size, more);
 
        if (n > size) {
-               ret = copy_to_user(*b, read_buf_addr(ldata, tail), size);
+               ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), size);
                if (ret)
                        return -EFAULT;
-               ret = copy_to_user(*b + size, ldata->read_buf, n - size);
+               ret = tty_copy_to_user(tty, *b + size, ldata->read_buf, n - size);
        } else
-               ret = copy_to_user(*b, read_buf_addr(ldata, tail), n);
+               ret = tty_copy_to_user(tty, *b, read_buf_addr(ldata, tail), n);
 
        if (ret)
                return -EFAULT;
@@ -2167,7 +2190,7 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
        struct n_tty_data *ldata = tty->disc_data;
        unsigned char __user *b = buf;
        DEFINE_WAIT_FUNC(wait, woken_wake_function);
-       int c;
+       int c, done;
        int minimum, time;
        ssize_t retval = 0;
        long timeout;
@@ -2235,8 +2258,10 @@ static ssize_t n_tty_read(struct tty_struct *tty, struct file *file,
                    ((minimum - (b - buf)) >= 1))
                        ldata->minimum_to_wake = (minimum - (b - buf));
 
+               done = check_other_done(tty);
+
                if (!input_available_p(tty, 0)) {
-                       if (test_bit(TTY_OTHER_CLOSED, &tty->flags)) {
+                       if (done) {
                                retval = -EIO;
                                break;
                        }
@@ -2443,12 +2468,12 @@ static unsigned int n_tty_poll(struct tty_struct *tty, struct file *file,
 
        poll_wait(file, &tty->read_wait, wait);
        poll_wait(file, &tty->write_wait, wait);
+       if (check_other_done(tty))
+               mask |= POLLHUP;
        if (input_available_p(tty, 1))
                mask |= POLLIN | POLLRDNORM;
        if (tty->packet && tty->link->ctrl_status)
                mask |= POLLPRI | POLLIN | POLLRDNORM;
-       if (test_bit(TTY_OTHER_CLOSED, &tty->flags))
-               mask |= POLLHUP;
        if (tty_hung_up_p(file))
                mask |= POLLHUP;
        if (!(mask & (POLLHUP | POLLIN | POLLRDNORM))) {
index e72ee629cead1b0af93c54b96395cbfc98b69975..4d5e8409769c3cc412ba5d428e0f179b4b44fdad 100644 (file)
@@ -53,9 +53,8 @@ static void pty_close(struct tty_struct *tty, struct file *filp)
        /* Review - krefs on tty_link ?? */
        if (!tty->link)
                return;
-       tty_flush_to_ldisc(tty->link);
        set_bit(TTY_OTHER_CLOSED, &tty->link->flags);
-       wake_up_interruptible(&tty->link->read_wait);
+       tty_flip_buffer_push(tty->link->port);
        wake_up_interruptible(&tty->link->write_wait);
        if (tty->driver->subtype == PTY_TYPE_MASTER) {
                set_bit(TTY_OTHER_CLOSED, &tty->flags);
@@ -243,7 +242,9 @@ static int pty_open(struct tty_struct *tty, struct file *filp)
                goto out;
 
        clear_bit(TTY_IO_ERROR, &tty->flags);
+       /* TTY_OTHER_CLOSED must be cleared before TTY_OTHER_DONE */
        clear_bit(TTY_OTHER_CLOSED, &tty->link->flags);
+       clear_bit(TTY_OTHER_DONE, &tty->link->flags);
        set_bit(TTY_THROTTLED, &tty->flags);
        return 0;
 
index 9289999cb7c62bb05b2a4b758fa76d5ce9413316..dce1a23706e86531d3caa86ba4b03c36b03bf3cf 100644 (file)
@@ -562,12 +562,36 @@ static irqreturn_t omap_wake_irq(int irq, void *dev_id)
        return IRQ_NONE;
 }
 
+#ifdef CONFIG_SERIAL_8250_DMA
+static int omap_8250_dma_handle_irq(struct uart_port *port);
+#endif
+
+static irqreturn_t omap8250_irq(int irq, void *dev_id)
+{
+       struct uart_port *port = dev_id;
+       struct uart_8250_port *up = up_to_u8250p(port);
+       unsigned int iir;
+       int ret;
+
+#ifdef CONFIG_SERIAL_8250_DMA
+       if (up->dma) {
+               ret = omap_8250_dma_handle_irq(port);
+               return IRQ_RETVAL(ret);
+       }
+#endif
+
+       serial8250_rpm_get(up);
+       iir = serial_port_in(port, UART_IIR);
+       ret = serial8250_handle_irq(port, iir);
+       serial8250_rpm_put(up);
+
+       return IRQ_RETVAL(ret);
+}
+
 static int omap_8250_startup(struct uart_port *port)
 {
-       struct uart_8250_port *up =
-               container_of(port, struct uart_8250_port, port);
+       struct uart_8250_port *up = up_to_u8250p(port);
        struct omap8250_priv *priv = port->private_data;
-
        int ret;
 
        if (priv->wakeirq) {
@@ -580,10 +604,31 @@ static int omap_8250_startup(struct uart_port *port)
 
        pm_runtime_get_sync(port->dev);
 
-       ret = serial8250_do_startup(port);
-       if (ret)
+       up->mcr = 0;
+       serial_out(up, UART_FCR, UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
+
+       serial_out(up, UART_LCR, UART_LCR_WLEN8);
+
+       up->lsr_saved_flags = 0;
+       up->msr_saved_flags = 0;
+
+       if (up->dma) {
+               ret = serial8250_request_dma(up);
+               if (ret) {
+                       dev_warn_ratelimited(port->dev,
+                                            "failed to request DMA\n");
+                       up->dma = NULL;
+               }
+       }
+
+       ret = request_irq(port->irq, omap8250_irq, IRQF_SHARED,
+                         dev_name(port->dev), port);
+       if (ret < 0)
                goto err;
 
+       up->ier = UART_IER_RLSI | UART_IER_RDI;
+       serial_out(up, UART_IER, up->ier);
+
 #ifdef CONFIG_PM
        up->capabilities |= UART_CAP_RPM;
 #endif
@@ -610,8 +655,7 @@ err:
 
 static void omap_8250_shutdown(struct uart_port *port)
 {
-       struct uart_8250_port *up =
-               container_of(port, struct uart_8250_port, port);
+       struct uart_8250_port *up = up_to_u8250p(port);
        struct omap8250_priv *priv = port->private_data;
 
        flush_work(&priv->qos_work);
@@ -621,11 +665,24 @@ static void omap_8250_shutdown(struct uart_port *port)
        pm_runtime_get_sync(port->dev);
 
        serial_out(up, UART_OMAP_WER, 0);
-       serial8250_do_shutdown(port);
+
+       up->ier = 0;
+       serial_out(up, UART_IER, 0);
+
+       if (up->dma)
+               serial8250_release_dma(up);
+
+       /*
+        * Disable break condition and FIFOs
+        */
+       if (up->lcr & UART_LCR_SBC)
+               serial_out(up, UART_LCR, up->lcr & ~UART_LCR_SBC);
+       serial_out(up, UART_FCR, UART_FCR_CLEAR_RCVR | UART_FCR_CLEAR_XMIT);
 
        pm_runtime_mark_last_busy(port->dev);
        pm_runtime_put_autosuspend(port->dev);
 
+       free_irq(port->irq, port);
        if (priv->wakeirq)
                free_irq(priv->wakeirq, port);
 }
@@ -974,6 +1031,13 @@ static inline int omap_8250_rx_dma(struct uart_8250_port *p, unsigned int iir)
 }
 #endif
 
+static int omap8250_no_handle_irq(struct uart_port *port)
+{
+       /* IRQ has not been requested but handling irq? */
+       WARN_ONCE(1, "Unexpected irq handling before port startup\n");
+       return 0;
+}
+
 static int omap8250_probe(struct platform_device *pdev)
 {
        struct resource *regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
@@ -1075,6 +1139,7 @@ static int omap8250_probe(struct platform_device *pdev)
        pm_runtime_get_sync(&pdev->dev);
 
        omap_serial_fill_features_erratas(&up, priv);
+       up.port.handle_irq = omap8250_no_handle_irq;
 #ifdef CONFIG_SERIAL_8250_DMA
        if (pdev->dev.of_node) {
                /*
@@ -1088,7 +1153,6 @@ static int omap8250_probe(struct platform_device *pdev)
                ret = of_property_count_strings(pdev->dev.of_node, "dma-names");
                if (ret == 2) {
                        up.dma = &priv->omap8250_dma;
-                       up.port.handle_irq = omap_8250_dma_handle_irq;
                        priv->omap8250_dma.fn = the_no_dma_filter_fn;
                        priv->omap8250_dma.tx_dma = omap_8250_tx_dma;
                        priv->omap8250_dma.rx_dma = omap_8250_rx_dma;
index 5a4e9d579585f9c5165839db0fec8e368dfb8133..763eb20fe3213b6cfda04dc2624bcd1b8638f324 100644 (file)
@@ -1249,20 +1249,19 @@ __acquires(&uap->port.lock)
 
 /*
  * Transmit a character
- * There must be at least one free entry in the TX FIFO to accept the char.
  *
- * Returns true if the FIFO might have space in it afterwards;
- * returns false if the FIFO definitely became full.
+ * Returns true if the character was successfully queued to the FIFO.
+ * Returns false otherwise.
  */
 static bool pl011_tx_char(struct uart_amba_port *uap, unsigned char c)
 {
+       if (readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF)
+               return false; /* unable to transmit character */
+
        writew(c, uap->port.membase + UART01x_DR);
        uap->port.icount.tx++;
 
-       if (likely(uap->tx_irq_seen > 1))
-               return true;
-
-       return !(readw(uap->port.membase + UART01x_FR) & UART01x_FR_TXFF);
+       return true;
 }
 
 static bool pl011_tx_chars(struct uart_amba_port *uap)
@@ -1296,7 +1295,8 @@ static bool pl011_tx_chars(struct uart_amba_port *uap)
                return false;
 
        if (uap->port.x_char) {
-               pl011_tx_char(uap, uap->port.x_char);
+               if (!pl011_tx_char(uap, uap->port.x_char))
+                       goto done;
                uap->port.x_char = 0;
                --count;
        }
@@ -1639,6 +1639,9 @@ static int pl011_startup(struct uart_port *port)
 
        writew(uap->vendor->ifls, uap->port.membase + UART011_IFLS);
 
+       /* Assume that TX IRQ doesn't work until we see one: */
+       uap->tx_irq_seen = 0;
+
        spin_lock_irq(&uap->port.lock);
 
        /* restore RTS and DTR */
@@ -1702,7 +1705,7 @@ static void pl011_shutdown(struct uart_port *port)
        spin_lock_irq(&uap->port.lock);
        uap->im = 0;
        writew(uap->im, uap->port.membase + UART011_IMSC);
-       writew(0xffff & ~UART011_TXIS, uap->port.membase + UART011_ICR);
+       writew(0xffff, uap->port.membase + UART011_ICR);
        spin_unlock_irq(&uap->port.lock);
 
        pl011_dma_shutdown(uap);
index 5fdc9f3ecd644d9b58a0f9a10f150296b68282d7..6dc471e30e793aaf99bfc1bac6d376387d9e21a5 100644 (file)
@@ -187,13 +187,8 @@ static int __init param_setup_earlycon(char *buf)
                return 0;
 
        err = setup_earlycon(buf);
-       if (err == -ENOENT) {
-               pr_warn("no match for %s\n", buf);
-               err = 0;
-       } else if (err == -EALREADY) {
-               pr_warn("already registered\n");
-               err = 0;
-       }
+       if (err == -ENOENT || err == -EALREADY)
+               return 0;
        return err;
 }
 early_param("earlycon", param_setup_earlycon);
index c8cfa06371280af6abfd63bd379ee5c121523ad7..88250395b0ce96486a2dac5e2e9162fb7f4eae43 100644 (file)
@@ -911,6 +911,14 @@ static void dma_rx_callback(void *data)
 
        status = dmaengine_tx_status(chan, (dma_cookie_t)0, &state);
        count = RX_BUF_SIZE - state.residue;
+
+       if (readl(sport->port.membase + USR2) & USR2_IDLE) {
+               /* In condition [3] the SDMA counted up too early */
+               count--;
+
+               writel(USR2_IDLE, sport->port.membase + USR2);
+       }
+
        dev_dbg(sport->port.dev, "We get %d bytes.\n", count);
 
        if (count) {
index 211479aa34bb20d5375078afb103829faa5bfd84..7f49172ccd8673b316b3b82ab21282885ef983eb 100644 (file)
@@ -1735,6 +1735,8 @@ static int serial_omap_probe(struct platform_device *pdev)
 err_add_port:
        pm_runtime_put(&pdev->dev);
        pm_runtime_disable(&pdev->dev);
+       pm_qos_remove_request(&up->pm_qos_request);
+       device_init_wakeup(up->dev, false);
 err_rs485:
 err_port_line:
        return ret;
index 75661641f5fe068237e7830acd9472383964da3a..2f78b77f0f8180fa07df27ab2ab931cf73f53587 100644 (file)
 
 #define TTY_BUFFER_PAGE        (((PAGE_SIZE - sizeof(struct tty_buffer)) / 2) & ~0xFF)
 
+/*
+ * If all tty flip buffers have been processed by flush_to_ldisc() or
+ * dropped by tty_buffer_flush(), check if the linked pty has been closed.
+ * If so, wake the reader/poll to process
+ */
+static inline void check_other_closed(struct tty_struct *tty)
+{
+       unsigned long flags, old;
+
+       /* transition from TTY_OTHER_CLOSED => TTY_OTHER_DONE must be atomic */
+       for (flags = ACCESS_ONCE(tty->flags);
+            test_bit(TTY_OTHER_CLOSED, &flags);
+            ) {
+               old = flags;
+               __set_bit(TTY_OTHER_DONE, &flags);
+               flags = cmpxchg(&tty->flags, old, flags);
+               if (old == flags) {
+                       wake_up_interruptible(&tty->read_wait);
+                       break;
+               }
+       }
+}
 
 /**
  *     tty_buffer_lock_exclusive       -       gain exclusive access to buffer
@@ -229,6 +251,8 @@ void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld)
        if (ld && ld->ops->flush_buffer)
                ld->ops->flush_buffer(tty);
 
+       check_other_closed(tty);
+
        atomic_dec(&buf->priority);
        mutex_unlock(&buf->lock);
 }
@@ -471,8 +495,10 @@ static void flush_to_ldisc(struct work_struct *work)
                smp_rmb();
                count = head->commit - head->read;
                if (!count) {
-                       if (next == NULL)
+                       if (next == NULL) {
+                               check_other_closed(tty);
                                break;
+                       }
                        buf->head = next;
                        tty_buffer_free(port, head);
                        continue;
@@ -488,19 +514,6 @@ static void flush_to_ldisc(struct work_struct *work)
        tty_ldisc_deref(disc);
 }
 
-/**
- *     tty_flush_to_ldisc
- *     @tty: tty to push
- *
- *     Push the terminal flip buffers to the line discipline.
- *
- *     Must not be called from IRQ context.
- */
-void tty_flush_to_ldisc(struct tty_struct *tty)
-{
-       flush_work(&tty->port->buf.work);
-}
-
 /**
  *     tty_flip_buffer_push    -       terminal
  *     @port: tty port to push
index dfb05edcdb96dbbb3b794d6faf5c799b3c031f06..5b7061a331038d36ad20b7ec3c05b2ab1f8b3efc 100644 (file)
@@ -88,9 +88,13 @@ static ssize_t ci_port_test_write(struct file *file, const char __user *ubuf,
        char buf[32];
        int ret;
 
-       if (copy_from_user(buf, ubuf, min_t(size_t, sizeof(buf) - 1, count)))
+       count = min_t(size_t, sizeof(buf) - 1, count);
+       if (copy_from_user(buf, ubuf, count))
                return -EFAULT;
 
+       /* sscanf requires a zero terminated string */
+       buf[count] = '\0';
+
        if (sscanf(buf, "%u", &mode) != 1)
                return -EINVAL;
 
index 41e510ae8c837ea337135c4ec8ddbe26fccac275..d85abfed84ccaa2327820f1b35cabac11422d647 100644 (file)
@@ -106,6 +106,9 @@ static const struct usb_device_id usb_quirk_list[] = {
        { USB_DEVICE(0x04f3, 0x010c), .driver_info =
                        USB_QUIRK_DEVICE_QUALIFIER },
 
+       { USB_DEVICE(0x04f3, 0x0125), .driver_info =
+                       USB_QUIRK_DEVICE_QUALIFIER },
+
        { USB_DEVICE(0x04f3, 0x016f), .driver_info =
                        USB_QUIRK_DEVICE_QUALIFIER },
 
index fdab715a063119d6e696a8f66ea26d4a1613e983..c0eafa6fd40314086474f5b7cab8f63361c73d64 100644 (file)
 #define DWC3_DGCMD_SET_ENDPOINT_NRDY   0x0c
 #define DWC3_DGCMD_RUN_SOC_BUS_LOOPBACK        0x10
 
-#define DWC3_DGCMD_STATUS(n)           (((n) >> 15) & 1)
+#define DWC3_DGCMD_STATUS(n)           (((n) >> 12) & 0x0F)
 #define DWC3_DGCMD_CMDACT              (1 << 10)
 #define DWC3_DGCMD_CMDIOC              (1 << 8)
 
 #define DWC3_DEPCMD_PARAM_SHIFT                16
 #define DWC3_DEPCMD_PARAM(x)           ((x) << DWC3_DEPCMD_PARAM_SHIFT)
 #define DWC3_DEPCMD_GET_RSC_IDX(x)     (((x) >> DWC3_DEPCMD_PARAM_SHIFT) & 0x7f)
-#define DWC3_DEPCMD_STATUS(x)          (((x) >> 15) & 1)
+#define DWC3_DEPCMD_STATUS(x)          (((x) >> 12) & 0x0F)
 #define DWC3_DEPCMD_HIPRI_FORCERM      (1 << 11)
 #define DWC3_DEPCMD_CMDACT             (1 << 10)
 #define DWC3_DEPCMD_CMDIOC             (1 << 8)
index edba5348be186bf33857bd7c1d47233df6d12422..6b486a36863c08dd908df48375d819602672762b 100644 (file)
@@ -65,8 +65,8 @@
 #define USBOTGSS_IRQENABLE_SET_MISC            0x003c
 #define USBOTGSS_IRQENABLE_CLR_MISC            0x0040
 #define USBOTGSS_IRQMISC_OFFSET                        0x03fc
-#define USBOTGSS_UTMI_OTG_CTRL                 0x0080
-#define USBOTGSS_UTMI_OTG_STATUS               0x0084
+#define USBOTGSS_UTMI_OTG_STATUS               0x0080
+#define USBOTGSS_UTMI_OTG_CTRL                 0x0084
 #define USBOTGSS_UTMI_OTG_OFFSET               0x0480
 #define USBOTGSS_TXFIFO_DEPTH                  0x0508
 #define USBOTGSS_RXFIFO_DEPTH                  0x050c
 #define USBOTGSS_IRQMISC_DISCHRGVBUS_FALL              (1 << 3)
 #define USBOTGSS_IRQMISC_IDPULLUP_FALL         (1 << 0)
 
-/* UTMI_OTG_CTRL REGISTER */
-#define USBOTGSS_UTMI_OTG_CTRL_DRVVBUS         (1 << 5)
-#define USBOTGSS_UTMI_OTG_CTRL_CHRGVBUS                (1 << 4)
-#define USBOTGSS_UTMI_OTG_CTRL_DISCHRGVBUS     (1 << 3)
-#define USBOTGSS_UTMI_OTG_CTRL_IDPULLUP                (1 << 0)
-
 /* UTMI_OTG_STATUS REGISTER */
-#define USBOTGSS_UTMI_OTG_STATUS_SW_MODE       (1 << 31)
-#define USBOTGSS_UTMI_OTG_STATUS_POWERPRESENT  (1 << 9)
-#define USBOTGSS_UTMI_OTG_STATUS_TXBITSTUFFENABLE (1 << 8)
-#define USBOTGSS_UTMI_OTG_STATUS_IDDIG         (1 << 4)
-#define USBOTGSS_UTMI_OTG_STATUS_SESSEND       (1 << 3)
-#define USBOTGSS_UTMI_OTG_STATUS_SESSVALID     (1 << 2)
-#define USBOTGSS_UTMI_OTG_STATUS_VBUSVALID     (1 << 1)
+#define USBOTGSS_UTMI_OTG_STATUS_DRVVBUS       (1 << 5)
+#define USBOTGSS_UTMI_OTG_STATUS_CHRGVBUS      (1 << 4)
+#define USBOTGSS_UTMI_OTG_STATUS_DISCHRGVBUS   (1 << 3)
+#define USBOTGSS_UTMI_OTG_STATUS_IDPULLUP      (1 << 0)
+
+/* UTMI_OTG_CTRL REGISTER */
+#define USBOTGSS_UTMI_OTG_CTRL_SW_MODE         (1 << 31)
+#define USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT    (1 << 9)
+#define USBOTGSS_UTMI_OTG_CTRL_TXBITSTUFFENABLE (1 << 8)
+#define USBOTGSS_UTMI_OTG_CTRL_IDDIG           (1 << 4)
+#define USBOTGSS_UTMI_OTG_CTRL_SESSEND         (1 << 3)
+#define USBOTGSS_UTMI_OTG_CTRL_SESSVALID       (1 << 2)
+#define USBOTGSS_UTMI_OTG_CTRL_VBUSVALID       (1 << 1)
 
 struct dwc3_omap {
        struct device           *dev;
@@ -119,7 +119,7 @@ struct dwc3_omap {
        int                     irq;
        void __iomem            *base;
 
-       u32                     utmi_otg_status;
+       u32                     utmi_otg_ctrl;
        u32                     utmi_otg_offset;
        u32                     irqmisc_offset;
        u32                     irq_eoi_offset;
@@ -153,15 +153,15 @@ static inline void dwc3_omap_writel(void __iomem *base, u32 offset, u32 value)
        writel(value, base + offset);
 }
 
-static u32 dwc3_omap_read_utmi_status(struct dwc3_omap *omap)
+static u32 dwc3_omap_read_utmi_ctrl(struct dwc3_omap *omap)
 {
-       return dwc3_omap_readl(omap->base, USBOTGSS_UTMI_OTG_STATUS +
+       return dwc3_omap_readl(omap->base, USBOTGSS_UTMI_OTG_CTRL +
                                                        omap->utmi_otg_offset);
 }
 
-static void dwc3_omap_write_utmi_status(struct dwc3_omap *omap, u32 value)
+static void dwc3_omap_write_utmi_ctrl(struct dwc3_omap *omap, u32 value)
 {
-       dwc3_omap_writel(omap->base, USBOTGSS_UTMI_OTG_STATUS +
+       dwc3_omap_writel(omap->base, USBOTGSS_UTMI_OTG_CTRL +
                                        omap->utmi_otg_offset, value);
 
 }
@@ -235,25 +235,25 @@ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
                        }
                }
 
-               val = dwc3_omap_read_utmi_status(omap);
-               val &= ~(USBOTGSS_UTMI_OTG_STATUS_IDDIG
-                               | USBOTGSS_UTMI_OTG_STATUS_VBUSVALID
-                               | USBOTGSS_UTMI_OTG_STATUS_SESSEND);
-               val |= USBOTGSS_UTMI_OTG_STATUS_SESSVALID
-                               | USBOTGSS_UTMI_OTG_STATUS_POWERPRESENT;
-               dwc3_omap_write_utmi_status(omap, val);
+               val = dwc3_omap_read_utmi_ctrl(omap);
+               val &= ~(USBOTGSS_UTMI_OTG_CTRL_IDDIG
+                               | USBOTGSS_UTMI_OTG_CTRL_VBUSVALID
+                               | USBOTGSS_UTMI_OTG_CTRL_SESSEND);
+               val |= USBOTGSS_UTMI_OTG_CTRL_SESSVALID
+                               | USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT;
+               dwc3_omap_write_utmi_ctrl(omap, val);
                break;
 
        case OMAP_DWC3_VBUS_VALID:
                dev_dbg(omap->dev, "VBUS Connect\n");
 
-               val = dwc3_omap_read_utmi_status(omap);
-               val &= ~USBOTGSS_UTMI_OTG_STATUS_SESSEND;
-               val |= USBOTGSS_UTMI_OTG_STATUS_IDDIG
-                               | USBOTGSS_UTMI_OTG_STATUS_VBUSVALID
-                               | USBOTGSS_UTMI_OTG_STATUS_SESSVALID
-                               | USBOTGSS_UTMI_OTG_STATUS_POWERPRESENT;
-               dwc3_omap_write_utmi_status(omap, val);
+               val = dwc3_omap_read_utmi_ctrl(omap);
+               val &= ~USBOTGSS_UTMI_OTG_CTRL_SESSEND;
+               val |= USBOTGSS_UTMI_OTG_CTRL_IDDIG
+                               | USBOTGSS_UTMI_OTG_CTRL_VBUSVALID
+                               | USBOTGSS_UTMI_OTG_CTRL_SESSVALID
+                               | USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT;
+               dwc3_omap_write_utmi_ctrl(omap, val);
                break;
 
        case OMAP_DWC3_ID_FLOAT:
@@ -263,13 +263,13 @@ static void dwc3_omap_set_mailbox(struct dwc3_omap *omap,
        case OMAP_DWC3_VBUS_OFF:
                dev_dbg(omap->dev, "VBUS Disconnect\n");
 
-               val = dwc3_omap_read_utmi_status(omap);
-               val &= ~(USBOTGSS_UTMI_OTG_STATUS_SESSVALID
-                               | USBOTGSS_UTMI_OTG_STATUS_VBUSVALID
-                               | USBOTGSS_UTMI_OTG_STATUS_POWERPRESENT);
-               val |= USBOTGSS_UTMI_OTG_STATUS_SESSEND
-                               | USBOTGSS_UTMI_OTG_STATUS_IDDIG;
-               dwc3_omap_write_utmi_status(omap, val);
+               val = dwc3_omap_read_utmi_ctrl(omap);
+               val &= ~(USBOTGSS_UTMI_OTG_CTRL_SESSVALID
+                               | USBOTGSS_UTMI_OTG_CTRL_VBUSVALID
+                               | USBOTGSS_UTMI_OTG_CTRL_POWERPRESENT);
+               val |= USBOTGSS_UTMI_OTG_CTRL_SESSEND
+                               | USBOTGSS_UTMI_OTG_CTRL_IDDIG;
+               dwc3_omap_write_utmi_ctrl(omap, val);
                break;
 
        default:
@@ -422,22 +422,22 @@ static void dwc3_omap_set_utmi_mode(struct dwc3_omap *omap)
        struct device_node      *node = omap->dev->of_node;
        int                     utmi_mode = 0;
 
-       reg = dwc3_omap_read_utmi_status(omap);
+       reg = dwc3_omap_read_utmi_ctrl(omap);
 
        of_property_read_u32(node, "utmi-mode", &utmi_mode);
 
        switch (utmi_mode) {
        case DWC3_OMAP_UTMI_MODE_SW:
-               reg |= USBOTGSS_UTMI_OTG_STATUS_SW_MODE;
+               reg |= USBOTGSS_UTMI_OTG_CTRL_SW_MODE;
                break;
        case DWC3_OMAP_UTMI_MODE_HW:
-               reg &= ~USBOTGSS_UTMI_OTG_STATUS_SW_MODE;
+               reg &= ~USBOTGSS_UTMI_OTG_CTRL_SW_MODE;
                break;
        default:
                dev_dbg(omap->dev, "UNKNOWN utmi mode %d\n", utmi_mode);
        }
 
-       dwc3_omap_write_utmi_status(omap, reg);
+       dwc3_omap_write_utmi_ctrl(omap, reg);
 }
 
 static int dwc3_omap_extcon_register(struct dwc3_omap *omap)
@@ -614,7 +614,7 @@ static int dwc3_omap_suspend(struct device *dev)
 {
        struct dwc3_omap        *omap = dev_get_drvdata(dev);
 
-       omap->utmi_otg_status = dwc3_omap_read_utmi_status(omap);
+       omap->utmi_otg_ctrl = dwc3_omap_read_utmi_ctrl(omap);
        dwc3_omap_disable_irqs(omap);
 
        return 0;
@@ -624,7 +624,7 @@ static int dwc3_omap_resume(struct device *dev)
 {
        struct dwc3_omap        *omap = dev_get_drvdata(dev);
 
-       dwc3_omap_write_utmi_status(omap, omap->utmi_otg_status);
+       dwc3_omap_write_utmi_ctrl(omap, omap->utmi_otg_ctrl);
        dwc3_omap_enable_irqs(omap);
 
        pm_runtime_disable(dev);
index c42765b3a060bc6b28b0f7a55f0231489afe651c..0495c94a23d7e96a9c3554b99893b6b0f63e7302 100644 (file)
@@ -1295,6 +1295,7 @@ static void purge_configs_funcs(struct gadget_info *gi)
                        }
                }
                c->next_interface_id = 0;
+               memset(c->interface, 0, sizeof(c->interface));
                c->superspeed = 0;
                c->highspeed = 0;
                c->fullspeed = 0;
index 6bdb5706904497ca9eccb7fd5d979c67824d8600..3507f880eb74294c76ddbc43c3aa153528478f53 100644 (file)
@@ -315,7 +315,6 @@ static ssize_t ffs_ep0_write(struct file *file, const char __user *buf,
                                return ret;
                        }
 
-                       set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
                        return len;
                }
                break;
@@ -847,7 +846,7 @@ static ssize_t ffs_epfile_io(struct file *file, struct ffs_io_data *io_data)
                                ret = ep->status;
                                if (io_data->read && ret > 0) {
                                        ret = copy_to_iter(data, ret, &io_data->data);
-                                       if (unlikely(iov_iter_count(&io_data->data)))
+                                       if (!ret)
                                                ret = -EFAULT;
                                }
                        }
@@ -1463,8 +1462,7 @@ static void ffs_data_clear(struct ffs_data *ffs)
 {
        ENTER();
 
-       if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags))
-               ffs_closed(ffs);
+       ffs_closed(ffs);
 
        BUG_ON(ffs->gadget);
 
@@ -3422,9 +3420,13 @@ static int ffs_ready(struct ffs_data *ffs)
        ffs_obj->desc_ready = true;
        ffs_obj->ffs_data = ffs;
 
-       if (ffs_obj->ffs_ready_callback)
+       if (ffs_obj->ffs_ready_callback) {
                ret = ffs_obj->ffs_ready_callback(ffs);
+               if (ret)
+                       goto done;
+       }
 
+       set_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags);
 done:
        ffs_dev_unlock();
        return ret;
@@ -3443,7 +3445,8 @@ static void ffs_closed(struct ffs_data *ffs)
 
        ffs_obj->desc_ready = false;
 
-       if (ffs_obj->ffs_closed_callback)
+       if (test_and_clear_bit(FFS_FL_CALL_CLOSED_CALLBACK, &ffs->flags) &&
+           ffs_obj->ffs_closed_callback)
                ffs_obj->ffs_closed_callback(ffs);
 
        if (!ffs_obj->opts || ffs_obj->opts->no_configfs
index 13dfc9915b1dee679b89f83a19ab3ef56641ae34..f7f35a36c09a06eab17ef2e5af013ee3de2b5b8e 100644 (file)
@@ -437,12 +437,20 @@ static int hidg_setup(struct usb_function *f,
                  | USB_REQ_GET_DESCRIPTOR):
                switch (value >> 8) {
                case HID_DT_HID:
+               {
+                       struct hid_descriptor hidg_desc_copy = hidg_desc;
+
                        VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: HID\n");
+                       hidg_desc_copy.desc[0].bDescriptorType = HID_DT_REPORT;
+                       hidg_desc_copy.desc[0].wDescriptorLength =
+                               cpu_to_le16(hidg->report_desc_length);
+
                        length = min_t(unsigned short, length,
-                                                  hidg_desc.bLength);
-                       memcpy(req->buf, &hidg_desc, length);
+                                                  hidg_desc_copy.bLength);
+                       memcpy(req->buf, &hidg_desc_copy, length);
                        goto respond;
                        break;
+               }
                case HID_DT_REPORT:
                        VDBG(cdev, "USB_REQ_GET_DESCRIPTOR: REPORT\n");
                        length = min_t(unsigned short, length,
@@ -632,6 +640,10 @@ static int hidg_bind(struct usb_configuration *c, struct usb_function *f)
        hidg_fs_in_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
        hidg_hs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
        hidg_fs_out_ep_desc.wMaxPacketSize = cpu_to_le16(hidg->report_length);
+       /*
+        * We can use hidg_desc struct here but we should not relay
+        * that its content won't change after returning from this function.
+        */
        hidg_desc.desc[0].bDescriptorType = HID_DT_REPORT;
        hidg_desc.desc[0].wDescriptorLength =
                cpu_to_le16(hidg->report_desc_length);
index 259b656c0b3ec7bde9e119488f46ded351bb7300..6316aa5b1c4947a6df2e08b4c45856dc77b94374 100644 (file)
@@ -973,7 +973,13 @@ static ssize_t f_midi_opts_id_show(struct f_midi_opts *opts, char *page)
        int result;
 
        mutex_lock(&opts->lock);
-       result = strlcpy(page, opts->id, PAGE_SIZE);
+       if (opts->id) {
+               result = strlcpy(page, opts->id, PAGE_SIZE);
+       } else {
+               page[0] = 0;
+               result = 0;
+       }
+
        mutex_unlock(&opts->lock);
 
        return result;
index 9719abfb61455ca91ec5d1721e53622d4b76f1ef..7856b3394494b7d4250637277dd1f42f45d7a1ea 100644 (file)
@@ -588,7 +588,10 @@ static int f_audio_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
 
        if (intf == 1) {
                if (alt == 1) {
-                       config_ep_by_speed(cdev->gadget, f, out_ep);
+                       err = config_ep_by_speed(cdev->gadget, f, out_ep);
+                       if (err)
+                               return err;
+
                        usb_ep_enable(out_ep);
                        out_ep->driver_data = audio;
                        audio->copy_buf = f_audio_buffer_alloc(audio_buf_size);
index 89179ab20c109277a3d49c48b7d91d85eb821f4c..7ee057930ae71793bcbd2777cb275572af21c295 100644 (file)
@@ -113,6 +113,7 @@ struct gs_port {
        int write_allocated;
        struct gs_buf           port_write_buf;
        wait_queue_head_t       drain_wait;     /* wait while writes drain */
+       bool                    write_busy;
 
        /* REVISIT this state ... */
        struct usb_cdc_line_coding port_line_coding;    /* 8-N-1 etc */
@@ -363,7 +364,7 @@ __acquires(&port->port_lock)
        int                     status = 0;
        bool                    do_tty_wake = false;
 
-       while (!list_empty(pool)) {
+       while (!port->write_busy && !list_empty(pool)) {
                struct usb_request      *req;
                int                     len;
 
@@ -393,9 +394,11 @@ __acquires(&port->port_lock)
                 * NOTE that we may keep sending data for a while after
                 * the TTY closed (dev->ioport->port_tty is NULL).
                 */
+               port->write_busy = true;
                spin_unlock(&port->port_lock);
                status = usb_ep_queue(in, req, GFP_ATOMIC);
                spin_lock(&port->port_lock);
+               port->write_busy = false;
 
                if (status) {
                        pr_debug("%s: %s %s err %d\n",
index c30b7b572465d290b8cc6097915d63fd596aa79d..1194b09ae7462638d689eb99af6a15bd209c9e9e 100644 (file)
@@ -121,7 +121,7 @@ static struct usb_function *f_msg;
 /*
  * We _always_ have both ACM and mass storage functions.
  */
-static int __init acm_ms_do_config(struct usb_configuration *c)
+static int acm_ms_do_config(struct usb_configuration *c)
 {
        struct fsg_opts *opts;
        int     status;
@@ -174,7 +174,7 @@ static struct usb_configuration acm_ms_config_driver = {
 
 /*-------------------------------------------------------------------------*/
 
-static int __init acm_ms_bind(struct usb_composite_dev *cdev)
+static int acm_ms_bind(struct usb_composite_dev *cdev)
 {
        struct usb_gadget       *gadget = cdev->gadget;
        struct fsg_opts         *opts;
@@ -249,7 +249,7 @@ fail_get_msg:
        return status;
 }
 
-static int __exit acm_ms_unbind(struct usb_composite_dev *cdev)
+static int acm_ms_unbind(struct usb_composite_dev *cdev)
 {
        usb_put_function(f_msg);
        usb_put_function_instance(fi_msg);
@@ -258,13 +258,13 @@ static int __exit acm_ms_unbind(struct usb_composite_dev *cdev)
        return 0;
 }
 
-static __refdata struct usb_composite_driver acm_ms_driver = {
+static struct usb_composite_driver acm_ms_driver = {
        .name           = "g_acm_ms",
        .dev            = &device_desc,
        .max_speed      = USB_SPEED_SUPER,
        .strings        = dev_strings,
        .bind           = acm_ms_bind,
-       .unbind         = __exit_p(acm_ms_unbind),
+       .unbind         = acm_ms_unbind,
 };
 
 module_usb_composite_driver(acm_ms_driver);
index f46a3956e43d1f334d9dafd2d3055aa99eee5b58..f289caf18a45341dc613a676345bfe8eeb51ee8e 100644 (file)
@@ -167,7 +167,7 @@ static const struct usb_descriptor_header *otg_desc[] = {
 
 /*-------------------------------------------------------------------------*/
 
-static int __init audio_do_config(struct usb_configuration *c)
+static int audio_do_config(struct usb_configuration *c)
 {
        int status;
 
@@ -216,7 +216,7 @@ static struct usb_configuration audio_config_driver = {
 
 /*-------------------------------------------------------------------------*/
 
-static int __init audio_bind(struct usb_composite_dev *cdev)
+static int audio_bind(struct usb_composite_dev *cdev)
 {
 #ifndef CONFIG_GADGET_UAC1
        struct f_uac2_opts      *uac2_opts;
@@ -276,7 +276,7 @@ fail:
        return status;
 }
 
-static int __exit audio_unbind(struct usb_composite_dev *cdev)
+static int audio_unbind(struct usb_composite_dev *cdev)
 {
 #ifdef CONFIG_GADGET_UAC1
        if (!IS_ERR_OR_NULL(f_uac1))
@@ -292,13 +292,13 @@ static int __exit audio_unbind(struct usb_composite_dev *cdev)
        return 0;
 }
 
-static __refdata struct usb_composite_driver audio_driver = {
+static struct usb_composite_driver audio_driver = {
        .name           = "g_audio",
        .dev            = &device_desc,
        .strings        = audio_strings,
        .max_speed      = USB_SPEED_HIGH,
        .bind           = audio_bind,
-       .unbind         = __exit_p(audio_unbind),
+       .unbind         = audio_unbind,
 };
 
 module_usb_composite_driver(audio_driver);
index 2e85d947347830b5b09ddf70b43631c85b2489d8..afd3e37921a7d9f1cc879cda5fe64ac5f940b6fa 100644 (file)
@@ -104,7 +104,7 @@ static struct usb_function_instance *fi_ecm;
 /*
  * We _always_ have both CDC ECM and CDC ACM functions.
  */
-static int __init cdc_do_config(struct usb_configuration *c)
+static int cdc_do_config(struct usb_configuration *c)
 {
        int     status;
 
@@ -153,7 +153,7 @@ static struct usb_configuration cdc_config_driver = {
 
 /*-------------------------------------------------------------------------*/
 
-static int __init cdc_bind(struct usb_composite_dev *cdev)
+static int cdc_bind(struct usb_composite_dev *cdev)
 {
        struct usb_gadget       *gadget = cdev->gadget;
        struct f_ecm_opts       *ecm_opts;
@@ -211,7 +211,7 @@ fail:
        return status;
 }
 
-static int __exit cdc_unbind(struct usb_composite_dev *cdev)
+static int cdc_unbind(struct usb_composite_dev *cdev)
 {
        usb_put_function(f_acm);
        usb_put_function_instance(fi_serial);
@@ -222,13 +222,13 @@ static int __exit cdc_unbind(struct usb_composite_dev *cdev)
        return 0;
 }
 
-static __refdata struct usb_composite_driver cdc_driver = {
+static struct usb_composite_driver cdc_driver = {
        .name           = "g_cdc",
        .dev            = &device_desc,
        .strings        = dev_strings,
        .max_speed      = USB_SPEED_HIGH,
        .bind           = cdc_bind,
-       .unbind         = __exit_p(cdc_unbind),
+       .unbind         = cdc_unbind,
 };
 
 module_usb_composite_driver(cdc_driver);
index 633683a72a1169d95ee9f553efde929e9bf48808..204b10b1a7e7dd08c36bc00d44205e0c6753b1c2 100644 (file)
@@ -284,7 +284,7 @@ fail_1:
        return -ENODEV;
 }
 
-static int __init dbgp_bind(struct usb_gadget *gadget,
+static int dbgp_bind(struct usb_gadget *gadget,
                struct usb_gadget_driver *driver)
 {
        int err, stp;
@@ -406,7 +406,7 @@ fail:
        return err;
 }
 
-static __refdata struct usb_gadget_driver dbgp_driver = {
+static struct usb_gadget_driver dbgp_driver = {
        .function = "dbgp",
        .max_speed = USB_SPEED_HIGH,
        .bind = dbgp_bind,
index c5fdc61cdc4a6bcf1684d005fddb7f34e2971bb6..a3323dca218f9514b4c2a5e4319d9866c950d8e5 100644 (file)
@@ -222,7 +222,7 @@ static struct usb_function *f_rndis;
  * the first one present.  That's to make Microsoft's drivers happy,
  * and to follow DOCSIS 1.0 (cable modem standard).
  */
-static int __init rndis_do_config(struct usb_configuration *c)
+static int rndis_do_config(struct usb_configuration *c)
 {
        int status;
 
@@ -264,7 +264,7 @@ MODULE_PARM_DESC(use_eem, "use CDC EEM mode");
 /*
  * We _always_ have an ECM, CDC Subset, or EEM configuration.
  */
-static int __init eth_do_config(struct usb_configuration *c)
+static int eth_do_config(struct usb_configuration *c)
 {
        int status = 0;
 
@@ -318,7 +318,7 @@ static struct usb_configuration eth_config_driver = {
 
 /*-------------------------------------------------------------------------*/
 
-static int __init eth_bind(struct usb_composite_dev *cdev)
+static int eth_bind(struct usb_composite_dev *cdev)
 {
        struct usb_gadget       *gadget = cdev->gadget;
        struct f_eem_opts       *eem_opts = NULL;
@@ -447,7 +447,7 @@ fail:
        return status;
 }
 
-static int __exit eth_unbind(struct usb_composite_dev *cdev)
+static int eth_unbind(struct usb_composite_dev *cdev)
 {
        if (has_rndis()) {
                usb_put_function(f_rndis);
@@ -466,13 +466,13 @@ static int __exit eth_unbind(struct usb_composite_dev *cdev)
        return 0;
 }
 
-static __refdata struct usb_composite_driver eth_driver = {
+static struct usb_composite_driver eth_driver = {
        .name           = "g_ether",
        .dev            = &device_desc,
        .strings        = dev_strings,
        .max_speed      = USB_SPEED_SUPER,
        .bind           = eth_bind,
-       .unbind         = __exit_p(eth_unbind),
+       .unbind         = eth_unbind,
 };
 
 module_usb_composite_driver(eth_driver);
index b01b88e1b716a5902d5276196ef459663545a216..e821931c965cd9203a8011358ffeb16844dc7eed 100644 (file)
@@ -163,7 +163,7 @@ static int gfs_unbind(struct usb_composite_dev *cdev);
 static int gfs_do_config(struct usb_configuration *c);
 
 
-static __refdata struct usb_composite_driver gfs_driver = {
+static struct usb_composite_driver gfs_driver = {
        .name           = DRIVER_NAME,
        .dev            = &gfs_dev_desc,
        .strings        = gfs_dev_strings,
@@ -304,8 +304,10 @@ static int functionfs_ready_callback(struct ffs_data *ffs)
        gfs_registered = true;
 
        ret = usb_composite_probe(&gfs_driver);
-       if (unlikely(ret < 0))
+       if (unlikely(ret < 0)) {
+               ++missing_funcs;
                gfs_registered = false;
+       }
        
        return ret;
 }
index e02a095294ac24fe0640555839d37e57833a13bd..da19c486b61e33a5b3214830f1c5db42ed979a75 100644 (file)
@@ -118,7 +118,7 @@ static struct usb_gadget_strings *dev_strings[] = {
 static struct usb_function_instance *fi_midi;
 static struct usb_function *f_midi;
 
-static int __exit midi_unbind(struct usb_composite_dev *dev)
+static int midi_unbind(struct usb_composite_dev *dev)
 {
        usb_put_function(f_midi);
        usb_put_function_instance(fi_midi);
@@ -133,7 +133,7 @@ static struct usb_configuration midi_config = {
        .MaxPower       = CONFIG_USB_GADGET_VBUS_DRAW,
 };
 
-static int __init midi_bind_config(struct usb_configuration *c)
+static int midi_bind_config(struct usb_configuration *c)
 {
        int status;
 
@@ -150,7 +150,7 @@ static int __init midi_bind_config(struct usb_configuration *c)
        return 0;
 }
 
-static int __init midi_bind(struct usb_composite_dev *cdev)
+static int midi_bind(struct usb_composite_dev *cdev)
 {
        struct f_midi_opts *midi_opts;
        int status;
@@ -185,13 +185,13 @@ put:
        return status;
 }
 
-static __refdata struct usb_composite_driver midi_driver = {
+static struct usb_composite_driver midi_driver = {
        .name           = (char *) longname,
        .dev            = &device_desc,
        .strings        = dev_strings,
        .max_speed      = USB_SPEED_HIGH,
        .bind           = midi_bind,
-       .unbind         = __exit_p(midi_unbind),
+       .unbind         = midi_unbind,
 };
 
 module_usb_composite_driver(midi_driver);
index 614b06d80b4122c0b469dc898b5a251551667e69..2baa572686c6acbd5e1350ab167b65a367482302 100644 (file)
@@ -106,7 +106,7 @@ static struct usb_gadget_strings *dev_strings[] = {
 
 /****************************** Configurations ******************************/
 
-static int __init do_config(struct usb_configuration *c)
+static int do_config(struct usb_configuration *c)
 {
        struct hidg_func_node *e, *n;
        int status = 0;
@@ -147,7 +147,7 @@ static struct usb_configuration config_driver = {
 
 /****************************** Gadget Bind ******************************/
 
-static int __init hid_bind(struct usb_composite_dev *cdev)
+static int hid_bind(struct usb_composite_dev *cdev)
 {
        struct usb_gadget *gadget = cdev->gadget;
        struct list_head *tmp;
@@ -205,7 +205,7 @@ put:
        return status;
 }
 
-static int __exit hid_unbind(struct usb_composite_dev *cdev)
+static int hid_unbind(struct usb_composite_dev *cdev)
 {
        struct hidg_func_node *n;
 
@@ -216,7 +216,7 @@ static int __exit hid_unbind(struct usb_composite_dev *cdev)
        return 0;
 }
 
-static int __init hidg_plat_driver_probe(struct platform_device *pdev)
+static int hidg_plat_driver_probe(struct platform_device *pdev)
 {
        struct hidg_func_descriptor *func = dev_get_platdata(&pdev->dev);
        struct hidg_func_node *entry;
@@ -252,13 +252,13 @@ static int hidg_plat_driver_remove(struct platform_device *pdev)
 /****************************** Some noise ******************************/
 
 
-static __refdata struct usb_composite_driver hidg_driver = {
+static struct usb_composite_driver hidg_driver = {
        .name           = "g_hid",
        .dev            = &device_desc,
        .strings        = dev_strings,
        .max_speed      = USB_SPEED_HIGH,
        .bind           = hid_bind,
-       .unbind         = __exit_p(hid_unbind),
+       .unbind         = hid_unbind,
 };
 
 static struct platform_driver hidg_plat_driver = {
index 8e27a8c9644470bfa8d5b44fc95a198aeda7214f..e7bfb081f111e4b60e55bce9ae5cd5f0743dad20 100644 (file)
@@ -130,7 +130,7 @@ static int msg_thread_exits(struct fsg_common *common)
        return 0;
 }
 
-static int __init msg_do_config(struct usb_configuration *c)
+static int msg_do_config(struct usb_configuration *c)
 {
        struct fsg_opts *opts;
        int ret;
@@ -170,7 +170,7 @@ static struct usb_configuration msg_config_driver = {
 
 /****************************** Gadget Bind ******************************/
 
-static int __init msg_bind(struct usb_composite_dev *cdev)
+static int msg_bind(struct usb_composite_dev *cdev)
 {
        static const struct fsg_operations ops = {
                .thread_exits = msg_thread_exits,
@@ -248,7 +248,7 @@ static int msg_unbind(struct usb_composite_dev *cdev)
 
 /****************************** Some noise ******************************/
 
-static __refdata struct usb_composite_driver msg_driver = {
+static struct usb_composite_driver msg_driver = {
        .name           = "g_mass_storage",
        .dev            = &msg_device_desc,
        .max_speed      = USB_SPEED_SUPER,
index 39d27bb343b410fc8eae1c6aefb59bc2bb1fe0d5..b21b51f0c9fadb27bbe319791ad970502eb88aa3 100644 (file)
@@ -149,7 +149,7 @@ static struct usb_function *f_acm_rndis;
 static struct usb_function *f_rndis;
 static struct usb_function *f_msg_rndis;
 
-static __init int rndis_do_config(struct usb_configuration *c)
+static int rndis_do_config(struct usb_configuration *c)
 {
        struct fsg_opts *fsg_opts;
        int ret;
@@ -237,7 +237,7 @@ static struct usb_function *f_acm_multi;
 static struct usb_function *f_ecm;
 static struct usb_function *f_msg_multi;
 
-static __init int cdc_do_config(struct usb_configuration *c)
+static int cdc_do_config(struct usb_configuration *c)
 {
        struct fsg_opts *fsg_opts;
        int ret;
@@ -466,7 +466,7 @@ fail:
        return status;
 }
 
-static int __exit multi_unbind(struct usb_composite_dev *cdev)
+static int multi_unbind(struct usb_composite_dev *cdev)
 {
 #ifdef CONFIG_USB_G_MULTI_CDC
        usb_put_function(f_msg_multi);
@@ -497,13 +497,13 @@ static int __exit multi_unbind(struct usb_composite_dev *cdev)
 /****************************** Some noise ******************************/
 
 
-static __refdata struct usb_composite_driver multi_driver = {
+static struct usb_composite_driver multi_driver = {
        .name           = "g_multi",
        .dev            = &device_desc,
        .strings        = dev_strings,
        .max_speed      = USB_SPEED_HIGH,
        .bind           = multi_bind,
-       .unbind         = __exit_p(multi_unbind),
+       .unbind         = multi_unbind,
        .needs_serial   = 1,
 };
 
index e90e23db2acba192330741b9219c5d28e6a94b38..6ce7421412e9c14d7766210ab3442d06ca8f49d6 100644 (file)
@@ -107,7 +107,7 @@ static struct usb_function *f_ncm;
 
 /*-------------------------------------------------------------------------*/
 
-static int __init ncm_do_config(struct usb_configuration *c)
+static int ncm_do_config(struct usb_configuration *c)
 {
        int status;
 
@@ -143,7 +143,7 @@ static struct usb_configuration ncm_config_driver = {
 
 /*-------------------------------------------------------------------------*/
 
-static int __init gncm_bind(struct usb_composite_dev *cdev)
+static int gncm_bind(struct usb_composite_dev *cdev)
 {
        struct usb_gadget       *gadget = cdev->gadget;
        struct f_ncm_opts       *ncm_opts;
@@ -186,7 +186,7 @@ fail:
        return status;
 }
 
-static int __exit gncm_unbind(struct usb_composite_dev *cdev)
+static int gncm_unbind(struct usb_composite_dev *cdev)
 {
        if (!IS_ERR_OR_NULL(f_ncm))
                usb_put_function(f_ncm);
@@ -195,13 +195,13 @@ static int __exit gncm_unbind(struct usb_composite_dev *cdev)
        return 0;
 }
 
-static __refdata struct usb_composite_driver ncm_driver = {
+static struct usb_composite_driver ncm_driver = {
        .name           = "g_ncm",
        .dev            = &device_desc,
        .strings        = dev_strings,
        .max_speed      = USB_SPEED_HIGH,
        .bind           = gncm_bind,
-       .unbind         = __exit_p(gncm_unbind),
+       .unbind         = gncm_unbind,
 };
 
 module_usb_composite_driver(ncm_driver);
index 9b8fd701648ced489d50ab562e8a8288afaf1125..4bb498a38a1c01eb17077844bc163028c3e22b93 100644 (file)
@@ -118,7 +118,7 @@ static struct usb_function_instance *fi_obex1;
 static struct usb_function_instance *fi_obex2;
 static struct usb_function_instance *fi_phonet;
 
-static int __init nokia_bind_config(struct usb_configuration *c)
+static int nokia_bind_config(struct usb_configuration *c)
 {
        struct usb_function *f_acm;
        struct usb_function *f_phonet = NULL;
@@ -224,7 +224,7 @@ err_get_acm:
        return status;
 }
 
-static int __init nokia_bind(struct usb_composite_dev *cdev)
+static int nokia_bind(struct usb_composite_dev *cdev)
 {
        struct usb_gadget       *gadget = cdev->gadget;
        int                     status;
@@ -307,7 +307,7 @@ err_usb:
        return status;
 }
 
-static int __exit nokia_unbind(struct usb_composite_dev *cdev)
+static int nokia_unbind(struct usb_composite_dev *cdev)
 {
        if (!IS_ERR_OR_NULL(f_obex1_cfg2))
                usb_put_function(f_obex1_cfg2);
@@ -338,13 +338,13 @@ static int __exit nokia_unbind(struct usb_composite_dev *cdev)
        return 0;
 }
 
-static __refdata struct usb_composite_driver nokia_driver = {
+static struct usb_composite_driver nokia_driver = {
        .name           = "g_nokia",
        .dev            = &device_desc,
        .strings        = dev_strings,
        .max_speed      = USB_SPEED_HIGH,
        .bind           = nokia_bind,
-       .unbind         = __exit_p(nokia_unbind),
+       .unbind         = nokia_unbind,
 };
 
 module_usb_composite_driver(nokia_driver);
index d5b6ee725a2ac04030ac52fc4b4e9c93477a90b6..1ce7df1060a5b237ae2532f514d95e3edd63cf34 100644 (file)
@@ -126,7 +126,7 @@ static struct usb_configuration printer_cfg_driver = {
        .bmAttributes           = USB_CONFIG_ATT_ONE | USB_CONFIG_ATT_SELFPOWER,
 };
 
-static int __init printer_do_config(struct usb_configuration *c)
+static int printer_do_config(struct usb_configuration *c)
 {
        struct usb_gadget       *gadget = c->cdev->gadget;
        int                     status = 0;
@@ -152,7 +152,7 @@ static int __init printer_do_config(struct usb_configuration *c)
        return status;
 }
 
-static int __init printer_bind(struct usb_composite_dev *cdev)
+static int printer_bind(struct usb_composite_dev *cdev)
 {
        struct f_printer_opts *opts;
        int ret, len;
@@ -191,7 +191,7 @@ static int __init printer_bind(struct usb_composite_dev *cdev)
        return ret;
 }
 
-static int __exit printer_unbind(struct usb_composite_dev *cdev)
+static int printer_unbind(struct usb_composite_dev *cdev)
 {
        usb_put_function(f_printer);
        usb_put_function_instance(fi_printer);
@@ -199,7 +199,7 @@ static int __exit printer_unbind(struct usb_composite_dev *cdev)
        return 0;
 }
 
-static __refdata struct usb_composite_driver printer_driver = {
+static struct usb_composite_driver printer_driver = {
        .name           = shortname,
        .dev            = &device_desc,
        .strings        = dev_strings,
index 1f5f978d35d5318be56d1e069750d42e18b6b3c5..8b7528f9b78eff02a7d20bca4122429d6294be6d 100644 (file)
@@ -174,7 +174,7 @@ out:
        return ret;
 }
 
-static int __init gs_bind(struct usb_composite_dev *cdev)
+static int gs_bind(struct usb_composite_dev *cdev)
 {
        int                     status;
 
@@ -230,7 +230,7 @@ static int gs_unbind(struct usb_composite_dev *cdev)
        return 0;
 }
 
-static __refdata struct usb_composite_driver gserial_driver = {
+static struct usb_composite_driver gserial_driver = {
        .name           = "g_serial",
        .dev            = &device_desc,
        .strings        = dev_strings,
index 8b80addc4ce6a5aaae4a330f4e27abd3ea67c5f8..f9b4882fce528f7cd6fa04ec66d90109a3a12047 100644 (file)
@@ -2397,7 +2397,7 @@ static int usb_target_bind(struct usb_composite_dev *cdev)
        return 0;
 }
 
-static __refdata struct usb_composite_driver usbg_driver = {
+static struct usb_composite_driver usbg_driver = {
        .name           = "g_target",
        .dev            = &usbg_device_desc,
        .strings        = usbg_strings,
index 04a3da20f74248442f6cabdbb4853db2b0f4f3b9..72c976bf3530f595115a9536267069c50e13aeba 100644 (file)
@@ -334,7 +334,7 @@ static const struct uvc_descriptor_header * const uvc_ss_streaming_cls[] = {
  * USB configuration
  */
 
-static int __init
+static int
 webcam_config_bind(struct usb_configuration *c)
 {
        int status = 0;
@@ -358,7 +358,7 @@ static struct usb_configuration webcam_config_driver = {
        .MaxPower               = CONFIG_USB_GADGET_VBUS_DRAW,
 };
 
-static int /* __init_or_exit */
+static int
 webcam_unbind(struct usb_composite_dev *cdev)
 {
        if (!IS_ERR_OR_NULL(f_uvc))
@@ -368,7 +368,7 @@ webcam_unbind(struct usb_composite_dev *cdev)
        return 0;
 }
 
-static int __init
+static int
 webcam_bind(struct usb_composite_dev *cdev)
 {
        struct f_uvc_opts *uvc_opts;
@@ -422,7 +422,7 @@ error:
  * Driver
  */
 
-static __refdata struct usb_composite_driver webcam_driver = {
+static struct usb_composite_driver webcam_driver = {
        .name           = "g_webcam",
        .dev            = &webcam_device_descriptor,
        .strings        = webcam_device_strings,
index 5ee95152493c2b6b0be74257cc437bdb560f250b..c986e8addb90ac809428f780e04cfb9242c22a90 100644 (file)
@@ -272,7 +272,7 @@ static struct usb_function_instance *func_inst_lb;
 module_param_named(qlen, gzero_options.qlen, uint, S_IRUGO|S_IWUSR);
 MODULE_PARM_DESC(qlen, "depth of loopback queue");
 
-static int __init zero_bind(struct usb_composite_dev *cdev)
+static int zero_bind(struct usb_composite_dev *cdev)
 {
        struct f_ss_opts        *ss_opts;
        struct f_lb_opts        *lb_opts;
@@ -400,7 +400,7 @@ static int zero_unbind(struct usb_composite_dev *cdev)
        return 0;
 }
 
-static __refdata struct usb_composite_driver zero_driver = {
+static struct usb_composite_driver zero_driver = {
        .name           = "zero",
        .dev            = &device_desc,
        .strings        = dev_strings,
index 2fbedca3c2b4eb80a19325c3bcc54913df6a7c27..fc4226462f8f5da2f71741a77093b7d69343afe3 100644 (file)
@@ -1942,7 +1942,7 @@ err_unprepare_fclk:
        return retval;
 }
 
-static int __exit at91udc_remove(struct platform_device *pdev)
+static int at91udc_remove(struct platform_device *pdev)
 {
        struct at91_udc *udc = platform_get_drvdata(pdev);
        unsigned long   flags;
@@ -2018,7 +2018,7 @@ static int at91udc_resume(struct platform_device *pdev)
 #endif
 
 static struct platform_driver at91_udc_driver = {
-       .remove         = __exit_p(at91udc_remove),
+       .remove         = at91udc_remove,
        .shutdown       = at91udc_shutdown,
        .suspend        = at91udc_suspend,
        .resume         = at91udc_resume,
index 4c01953a0869cf67693ec2842d48b508770927a2..351d48550c332af0768e43912f0d21069fbf8ea4 100644 (file)
@@ -2186,7 +2186,7 @@ static int usba_udc_probe(struct platform_device *pdev)
        return 0;
 }
 
-static int __exit usba_udc_remove(struct platform_device *pdev)
+static int usba_udc_remove(struct platform_device *pdev)
 {
        struct usba_udc *udc;
        int i;
@@ -2258,7 +2258,7 @@ static int usba_udc_resume(struct device *dev)
 static SIMPLE_DEV_PM_OPS(usba_udc_pm_ops, usba_udc_suspend, usba_udc_resume);
 
 static struct platform_driver udc_driver = {
-       .remove         = __exit_p(usba_udc_remove),
+       .remove         = usba_udc_remove,
        .driver         = {
                .name           = "atmel_usba_udc",
                .pm             = &usba_udc_pm_ops,
index 55fcb930f92e404252620e14cd3556314287714e..c60022b46a4835b4cc2b151b7a46193fe0f5554b 100644 (file)
@@ -2525,7 +2525,7 @@ err_kfree:
 /* Driver removal function
  * Free resources and finish pending transactions
  */
-static int __exit fsl_udc_remove(struct platform_device *pdev)
+static int fsl_udc_remove(struct platform_device *pdev)
 {
        struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
@@ -2663,7 +2663,7 @@ static const struct platform_device_id fsl_udc_devtype[] = {
 };
 MODULE_DEVICE_TABLE(platform, fsl_udc_devtype);
 static struct platform_driver udc_driver = {
-       .remove         = __exit_p(fsl_udc_remove),
+       .remove         = fsl_udc_remove,
        /* Just for FSL i.mx SoC currently */
        .id_table       = fsl_udc_devtype,
        /* these suspend and resume are not usb suspend and resume */
index fb4df159d32d59cb81c8cdbbfd9b33b3aa1d8d89..3970f453de4903fb55c21831e4cb511b2a2afc11 100644 (file)
@@ -1342,7 +1342,7 @@ static const struct usb_gadget_ops fusb300_gadget_ops = {
        .udc_stop       = fusb300_udc_stop,
 };
 
-static int __exit fusb300_remove(struct platform_device *pdev)
+static int fusb300_remove(struct platform_device *pdev)
 {
        struct fusb300 *fusb300 = platform_get_drvdata(pdev);
 
@@ -1492,7 +1492,7 @@ clean_up:
 }
 
 static struct platform_driver fusb300_driver = {
-       .remove =       __exit_p(fusb300_remove),
+       .remove =       fusb300_remove,
        .driver         = {
                .name = (char *) udc_name,
        },
index 8c7c83c937139b084b48dba9a548683c865481d2..309706fe4bf0ab0fd61d37a492a2bef0d73deb2a 100644 (file)
@@ -1528,7 +1528,7 @@ static const struct usb_gadget_ops m66592_gadget_ops = {
        .pullup                 = m66592_pullup,
 };
 
-static int __exit m66592_remove(struct platform_device *pdev)
+static int m66592_remove(struct platform_device *pdev)
 {
        struct m66592           *m66592 = platform_get_drvdata(pdev);
 
@@ -1695,7 +1695,7 @@ clean_up:
 
 /*-------------------------------------------------------------------------*/
 static struct platform_driver m66592_driver = {
-       .remove =       __exit_p(m66592_remove),
+       .remove =       m66592_remove,
        .driver         = {
                .name = (char *) udc_name,
        },
index 2495fe9c95c5855f0c18be45d6f58deb0cf21a45..0293f7169deeace9688bab19316b2bd04fa8641c 100644 (file)
@@ -1820,7 +1820,7 @@ static const struct usb_gadget_ops r8a66597_gadget_ops = {
        .set_selfpowered        = r8a66597_set_selfpowered,
 };
 
-static int __exit r8a66597_remove(struct platform_device *pdev)
+static int r8a66597_remove(struct platform_device *pdev)
 {
        struct r8a66597         *r8a66597 = platform_get_drvdata(pdev);
 
@@ -1974,7 +1974,7 @@ clean_up2:
 
 /*-------------------------------------------------------------------------*/
 static struct platform_driver r8a66597_driver = {
-       .remove =       __exit_p(r8a66597_remove),
+       .remove =       r8a66597_remove,
        .driver         = {
                .name = (char *) udc_name,
        },
index b808951491ccbfcdd949d8e78f7c1cc2b4c55f47..99fd9a5667dfd4997092d982c0beae28b578a17c 100644 (file)
@@ -1487,7 +1487,7 @@ static int s3c2410_udc_pullup(struct usb_gadget *gadget, int is_on)
 
        dprintk(DEBUG_NORMAL, "%s()\n", __func__);
 
-       s3c2410_udc_set_pullup(udc, is_on ? 0 : 1);
+       s3c2410_udc_set_pullup(udc, is_on);
        return 0;
 }
 
index dd3e9fd31b801fbc9b8e200930f2dfe814aed2d3..1f24274477ab9352ce582f96bfbfbdd61605d07e 100644 (file)
@@ -2071,8 +2071,8 @@ static int xudc_probe(struct platform_device *pdev)
        /* Map the registers */
        res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
        udc->addr = devm_ioremap_resource(&pdev->dev, res);
-       if (!udc->addr)
-               return -ENOMEM;
+       if (IS_ERR(udc->addr))
+               return PTR_ERR(udc->addr);
 
        irq = platform_get_irq(pdev, 0);
        if (irq < 0) {
index f5397a517c54ce5a7df00b60e4b2482a2fd8b16c..7d34cbfaf373b54826cd0ffad697ef6f5d36c096 100644 (file)
@@ -2026,8 +2026,13 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
                break;
        case COMP_DEV_ERR:
        case COMP_STALL:
+               frame->status = -EPROTO;
+               skip_td = true;
+               break;
        case COMP_TX_ERR:
                frame->status = -EPROTO;
+               if (event_trb != td->last_trb)
+                       return 0;
                skip_td = true;
                break;
        case COMP_STOP:
@@ -2640,7 +2645,7 @@ irqreturn_t xhci_irq(struct usb_hcd *hcd)
                xhci_halt(xhci);
 hw_died:
                spin_unlock(&xhci->lock);
-               return -ESHUTDOWN;
+               return IRQ_HANDLED;
        }
 
        /*
index ec8ac16748547a2ac87bf9aa225ed0a36c0bf7df..36bf089b708fe5219258d46305719b7a999a23f6 100644 (file)
@@ -3682,18 +3682,21 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
 {
        struct xhci_hcd *xhci = hcd_to_xhci(hcd);
        unsigned long flags;
-       int ret;
+       int ret, slot_id;
        struct xhci_command *command;
 
        command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
        if (!command)
                return 0;
 
+       /* xhci->slot_id and xhci->addr_dev are not thread-safe */
+       mutex_lock(&xhci->mutex);
        spin_lock_irqsave(&xhci->lock, flags);
        command->completion = &xhci->addr_dev;
        ret = xhci_queue_slot_control(xhci, command, TRB_ENABLE_SLOT, 0);
        if (ret) {
                spin_unlock_irqrestore(&xhci->lock, flags);
+               mutex_unlock(&xhci->mutex);
                xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
                kfree(command);
                return 0;
@@ -3702,8 +3705,10 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
        spin_unlock_irqrestore(&xhci->lock, flags);
 
        wait_for_completion(command->completion);
+       slot_id = xhci->slot_id;
+       mutex_unlock(&xhci->mutex);
 
-       if (!xhci->slot_id || command->status != COMP_SUCCESS) {
+       if (!slot_id || command->status != COMP_SUCCESS) {
                xhci_err(xhci, "Error while assigning device slot ID\n");
                xhci_err(xhci, "Max number of devices this xHCI host supports is %u.\n",
                                HCS_MAX_SLOTS(
@@ -3728,11 +3733,11 @@ int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
         * xhci_discover_or_reset_device(), which may be called as part of
         * mass storage driver error handling.
         */
-       if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
+       if (!xhci_alloc_virt_device(xhci, slot_id, udev, GFP_NOIO)) {
                xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
                goto disable_slot;
        }
-       udev->slot_id = xhci->slot_id;
+       udev->slot_id = slot_id;
 
 #ifndef CONFIG_USB_DEFAULT_PERSIST
        /*
@@ -3778,12 +3783,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
        struct xhci_slot_ctx *slot_ctx;
        struct xhci_input_control_ctx *ctrl_ctx;
        u64 temp_64;
-       struct xhci_command *command;
+       struct xhci_command *command = NULL;
+
+       mutex_lock(&xhci->mutex);
 
        if (!udev->slot_id) {
                xhci_dbg_trace(xhci, trace_xhci_dbg_address,
                                "Bad Slot ID %d", udev->slot_id);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
 
        virt_dev = xhci->devs[udev->slot_id];
@@ -3796,7 +3804,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
                 */
                xhci_warn(xhci, "Virt dev invalid for slot_id 0x%x!\n",
                        udev->slot_id);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
 
        if (setup == SETUP_CONTEXT_ONLY) {
@@ -3804,13 +3813,15 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
                if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
                    SLOT_STATE_DEFAULT) {
                        xhci_dbg(xhci, "Slot already in default state\n");
-                       return 0;
+                       goto out;
                }
        }
 
        command = xhci_alloc_command(xhci, false, false, GFP_KERNEL);
-       if (!command)
-               return -ENOMEM;
+       if (!command) {
+               ret = -ENOMEM;
+               goto out;
+       }
 
        command->in_ctx = virt_dev->in_ctx;
        command->completion = &xhci->addr_dev;
@@ -3820,8 +3831,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
        if (!ctrl_ctx) {
                xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
                                __func__);
-               kfree(command);
-               return -EINVAL;
+               ret = -EINVAL;
+               goto out;
        }
        /*
         * If this is the first Set Address since device plug-in or
@@ -3848,8 +3859,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
                spin_unlock_irqrestore(&xhci->lock, flags);
                xhci_dbg_trace(xhci, trace_xhci_dbg_address,
                                "FIXME: allocate a command ring segment");
-               kfree(command);
-               return ret;
+               goto out;
        }
        xhci_ring_cmd_db(xhci);
        spin_unlock_irqrestore(&xhci->lock, flags);
@@ -3896,10 +3906,8 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
                ret = -EINVAL;
                break;
        }
-       if (ret) {
-               kfree(command);
-               return ret;
-       }
+       if (ret)
+               goto out;
        temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
        xhci_dbg_trace(xhci, trace_xhci_dbg_address,
                        "Op regs DCBAA ptr = %#016llx", temp_64);
@@ -3932,8 +3940,10 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
        xhci_dbg_trace(xhci, trace_xhci_dbg_address,
                       "Internal device address = %d",
                       le32_to_cpu(slot_ctx->dev_state) & DEV_ADDR_MASK);
+out:
+       mutex_unlock(&xhci->mutex);
        kfree(command);
-       return 0;
+       return ret;
 }
 
 int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
@@ -4855,6 +4865,7 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks)
                return 0;
        }
 
+       mutex_init(&xhci->mutex);
        xhci->cap_regs = hcd->regs;
        xhci->op_regs = hcd->regs +
                HC_LENGTH(readl(&xhci->cap_regs->hc_capbase));
@@ -5011,4 +5022,12 @@ static int __init xhci_hcd_init(void)
        BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
        return 0;
 }
+
+/*
+ * If an init function is provided, an exit function must also be provided
+ * to allow module unload.
+ */
+static void __exit xhci_hcd_fini(void) { }
+
 module_init(xhci_hcd_init);
+module_exit(xhci_hcd_fini);
index 8e421b89632ddaa4eaf30ee34ef60f89b7ac3e2d..6977f8491fa7ced6ea317bf75354a0eb7703670e 100644 (file)
@@ -1267,7 +1267,7 @@ union xhci_trb {
  * since the command ring is 64-byte aligned.
  * It must also be greater than 16.
  */
-#define TRBS_PER_SEGMENT       64
+#define TRBS_PER_SEGMENT       256
 /* Allow two commands + a link TRB, along with any reserved command TRBs */
 #define MAX_RSVD_CMD_TRBS      (TRBS_PER_SEGMENT - 3)
 #define TRB_SEGMENT_SIZE       (TRBS_PER_SEGMENT*16)
@@ -1497,6 +1497,8 @@ struct xhci_hcd {
        struct list_head        lpm_failed_devs;
 
        /* slot enabling and address device helpers */
+       /* these are not thread safe so use mutex */
+       struct mutex mutex;
        struct completion       addr_dev;
        int slot_id;
        /* For USB 3.0 LPM enable/disable. */
index 3789b08ef67b037781e278c41c0d4b2f2d33e5d9..6dca3d794ced6e1948dd5cbb180e708893f7ba83 100644 (file)
@@ -2021,13 +2021,7 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
        if (musb->ops->quirks)
                musb->io.quirks = musb->ops->quirks;
 
-       /* At least tusb6010 has it's own offsets.. */
-       if (musb->ops->ep_offset)
-               musb->io.ep_offset = musb->ops->ep_offset;
-       if (musb->ops->ep_select)
-               musb->io.ep_select = musb->ops->ep_select;
-
-       /* ..and some devices use indexed offset or flat offset */
+       /* Most devices use indexed offset or flat offset */
        if (musb->io.quirks & MUSB_INDEXED_EP) {
                musb->io.ep_offset = musb_indexed_ep_offset;
                musb->io.ep_select = musb_indexed_ep_select;
@@ -2036,6 +2030,12 @@ musb_init_controller(struct device *dev, int nIrq, void __iomem *ctrl)
                musb->io.ep_select = musb_flat_ep_select;
        }
 
+       /* At least tusb6010 has its own offsets */
+       if (musb->ops->ep_offset)
+               musb->io.ep_offset = musb->ops->ep_offset;
+       if (musb->ops->ep_select)
+               musb->io.ep_select = musb->ops->ep_select;
+
        if (musb->ops->fifo_mode)
                fifo_mode = musb->ops->fifo_mode;
        else
index 7225d526df0446ff26fd69ef65268265737d8c66..03ab0c699f74dd1768f2b769ca823eb7904132ab 100644 (file)
@@ -1179,7 +1179,7 @@ static int ab8500_usb_irq_setup(struct platform_device *pdev,
                }
                err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
                                ab8500_usb_link_status_irq,
-                               IRQF_NO_SUSPEND | IRQF_SHARED,
+                               IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT,
                                "usb-link-status", ab);
                if (err < 0) {
                        dev_err(ab->dev, "request_irq failed for link status irq\n");
@@ -1195,7 +1195,7 @@ static int ab8500_usb_irq_setup(struct platform_device *pdev,
                }
                err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
                                ab8500_usb_disconnect_irq,
-                               IRQF_NO_SUSPEND | IRQF_SHARED,
+                               IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT,
                                "usb-id-fall", ab);
                if (err < 0) {
                        dev_err(ab->dev, "request_irq failed for ID fall irq\n");
@@ -1211,7 +1211,7 @@ static int ab8500_usb_irq_setup(struct platform_device *pdev,
                }
                err = devm_request_threaded_irq(&pdev->dev, irq, NULL,
                                ab8500_usb_disconnect_irq,
-                               IRQF_NO_SUSPEND | IRQF_SHARED,
+                               IRQF_NO_SUSPEND | IRQF_SHARED | IRQF_ONESHOT,
                                "usb-vbus-fall", ab);
                if (err < 0) {
                        dev_err(ab->dev, "request_irq failed for Vbus fall irq\n");
index 1e0e10dd6ba51904b34bb32a9155cba7a5173f8f..3af263cc0caa3760a7a1704f329803e4951e62ae 100644 (file)
@@ -94,7 +94,7 @@ struct isp1301 {
 
 #if defined(CONFIG_MACH_OMAP_H2) || defined(CONFIG_MACH_OMAP_H3)
 
-#if    defined(CONFIG_TPS65010) || defined(CONFIG_TPS65010_MODULE)
+#if    defined(CONFIG_TPS65010) || (defined(CONFIG_TPS65010_MODULE) && defined(MODULE))
 
 #include <linux/i2c/tps65010.h>
 
index 845f658276b106342907c7606a078dbfa47d06d1..2b28443d07b92daed26660f1d80f0bd390937992 100644 (file)
@@ -401,7 +401,8 @@ static int tahvo_usb_probe(struct platform_device *pdev)
        dev_set_drvdata(&pdev->dev, tu);
 
        tu->irq = platform_get_irq(pdev, 0);
-       ret = request_threaded_irq(tu->irq, NULL, tahvo_usb_vbus_interrupt, 0,
+       ret = request_threaded_irq(tu->irq, NULL, tahvo_usb_vbus_interrupt,
+                                  IRQF_ONESHOT,
                                   "tahvo-vbus", tu);
        if (ret) {
                dev_err(&pdev->dev, "could not register tahvo-vbus irq: %d\n",
index 8597cf9cfceb7715883738ac8cf1c0380e9a00b1..c0f5c652d272c8959f5b3d59461e1af139d6f7fd 100644 (file)
@@ -611,6 +611,8 @@ struct usbhs_pkt_handle usbhs_fifo_pio_push_handler = {
 static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
 {
        struct usbhs_pipe *pipe = pkt->pipe;
+       struct usbhs_priv *priv = usbhs_pipe_to_priv(pipe);
+       struct usbhs_fifo *fifo = usbhsf_get_cfifo(priv);
 
        if (usbhs_pipe_is_busy(pipe))
                return 0;
@@ -624,6 +626,9 @@ static int usbhsf_prepare_pop(struct usbhs_pkt *pkt, int *is_done)
        usbhs_pipe_data_sequence(pipe, pkt->sequence);
        pkt->sequence = -1; /* -1 sequence will be ignored */
 
+       if (usbhs_pipe_is_dcp(pipe))
+               usbhsf_fifo_clear(pipe, fifo);
+
        usbhs_pipe_set_trans_count_if_bulk(pipe, pkt->length);
        usbhs_pipe_enable(pipe);
        usbhs_pipe_running(pipe, 1);
@@ -673,7 +678,14 @@ static int usbhsf_pio_try_pop(struct usbhs_pkt *pkt, int *is_done)
                *is_done = 1;
                usbhsf_rx_irq_ctrl(pipe, 0);
                usbhs_pipe_running(pipe, 0);
-               usbhs_pipe_disable(pipe);       /* disable pipe first */
+               /*
+                * If function mode, since this controller is possible to enter
+                * Control Write status stage at this timing, this driver
+                * should not disable the pipe. If such a case happens, this
+                * controller is not able to complete the status stage.
+                */
+               if (!usbhs_mod_is_host(priv) && !usbhs_pipe_is_dcp(pipe))
+                       usbhs_pipe_disable(pipe);       /* disable pipe first */
        }
 
        /*
@@ -1227,15 +1239,21 @@ static void usbhsf_dma_init_dt(struct device *dev, struct usbhs_fifo *fifo,
 {
        char name[16];
 
-       snprintf(name, sizeof(name), "tx%d", channel);
-       fifo->tx_chan = dma_request_slave_channel_reason(dev, name);
-       if (IS_ERR(fifo->tx_chan))
-               fifo->tx_chan = NULL;
-
-       snprintf(name, sizeof(name), "rx%d", channel);
-       fifo->rx_chan = dma_request_slave_channel_reason(dev, name);
-       if (IS_ERR(fifo->rx_chan))
-               fifo->rx_chan = NULL;
+       /*
+        * To avoid complex handing for DnFIFOs, the driver uses each
+        * DnFIFO as TX or RX direction (not bi-direction).
+        * So, the driver uses odd channels for TX, even channels for RX.
+        */
+       snprintf(name, sizeof(name), "ch%d", channel);
+       if (channel & 1) {
+               fifo->tx_chan = dma_request_slave_channel_reason(dev, name);
+               if (IS_ERR(fifo->tx_chan))
+                       fifo->tx_chan = NULL;
+       } else {
+               fifo->rx_chan = dma_request_slave_channel_reason(dev, name);
+               if (IS_ERR(fifo->rx_chan))
+                       fifo->rx_chan = NULL;
+       }
 }
 
 static void usbhsf_dma_init(struct usbhs_priv *priv, struct usbhs_fifo *fifo,
index 84ce2d74894c9c3b25c7bcf0185f23887492eb94..ffd739e31bfc193b058628560e86ea6f9b96f375 100644 (file)
@@ -127,6 +127,8 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(0x10C4, 0x88A5) }, /* Planet Innovation Ingeni ZigBee USB Device */
        { USB_DEVICE(0x10C4, 0x8946) }, /* Ketra N1 Wireless Interface */
        { USB_DEVICE(0x10C4, 0x8977) }, /* CEL MeshWorks DevKit Device */
+       { USB_DEVICE(0x10C4, 0x8998) }, /* KCF Technologies PRN */
+       { USB_DEVICE(0x10C4, 0x8A2A) }, /* HubZ dual ZigBee and Z-Wave dongle */
        { USB_DEVICE(0x10C4, 0xEA60) }, /* Silicon Labs factory default */
        { USB_DEVICE(0x10C4, 0xEA61) }, /* Silicon Labs factory default */
        { USB_DEVICE(0x10C4, 0xEA70) }, /* Silicon Labs factory default */
index 8eb68a31cab6c4021617ca555cd58b086872c112..4c8b3b82103d6318ea1d46250ad708bb3f722260 100644 (file)
@@ -699,6 +699,7 @@ static const struct usb_device_id id_table_combined[] = {
        { USB_DEVICE(XSENS_VID, XSENS_AWINDA_DONGLE_PID) },
        { USB_DEVICE(XSENS_VID, XSENS_AWINDA_STATION_PID) },
        { USB_DEVICE(XSENS_VID, XSENS_CONVERTER_PID) },
+       { USB_DEVICE(XSENS_VID, XSENS_MTDEVBOARD_PID) },
        { USB_DEVICE(XSENS_VID, XSENS_MTW_PID) },
        { USB_DEVICE(FTDI_VID, FTDI_OMNI1509) },
        { USB_DEVICE(MOBILITY_VID, MOBILITY_USB_SERIAL_PID) },
index 4e4f46f3c89c025670d42860756f39b2bb62ae24..792e054126de51402711814f5962945f7742e188 100644 (file)
 #define XSENS_AWINDA_STATION_PID 0x0101
 #define XSENS_AWINDA_DONGLE_PID 0x0102
 #define XSENS_MTW_PID          0x0200  /* Xsens MTw */
+#define XSENS_MTDEVBOARD_PID   0x0300  /* Motion Tracker Development Board */
 #define XSENS_CONVERTER_PID    0xD00D  /* Xsens USB-serial converter */
 
 /* Xsens devices using FTDI VID */
index 829604d11f3fa72a6b5bec5580f150c27c513cbf..f5257af33ecfbc30bb0989fe03fea09af215639e 100644 (file)
@@ -61,7 +61,6 @@ static const struct usb_device_id id_table[] = {
        { USB_DEVICE(DCU10_VENDOR_ID, DCU10_PRODUCT_ID) },
        { USB_DEVICE(SITECOM_VENDOR_ID, SITECOM_PRODUCT_ID) },
        { USB_DEVICE(ALCATEL_VENDOR_ID, ALCATEL_PRODUCT_ID) },
-       { USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_PRODUCT_ID) },
        { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_SX1),
                .driver_info = PL2303_QUIRK_UART_STATE_IDX0 },
        { USB_DEVICE(SIEMENS_VENDOR_ID, SIEMENS_PRODUCT_ID_X65),
index 71fd9da1d6e7ac6e36ecdf38e8f8192c60fbbc39..e3b7af8adfb73ccefa92d4ba3c2c927a044dfa43 100644 (file)
 #define ALCATEL_VENDOR_ID      0x11f7
 #define ALCATEL_PRODUCT_ID     0x02df
 
-/* Samsung I330 phone cradle */
-#define SAMSUNG_VENDOR_ID      0x04e8
-#define SAMSUNG_PRODUCT_ID     0x8001
-
 #define SIEMENS_VENDOR_ID      0x11f5
 #define SIEMENS_PRODUCT_ID_SX1 0x0001
 #define SIEMENS_PRODUCT_ID_X65 0x0003
index bf2bd40e5f2ac7cdf1291caf1350f686925039bf..60afb39eb73c0b95d261ec367890a306621fd2de 100644 (file)
@@ -95,7 +95,7 @@ static const struct usb_device_id id_table[] = {
                .driver_info = (kernel_ulong_t)&palm_os_4_probe },
        { USB_DEVICE(ACER_VENDOR_ID, ACER_S10_ID),
                .driver_info = (kernel_ulong_t)&palm_os_4_probe },
-       { USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SCH_I330_ID),
+       { USB_DEVICE_INTERFACE_CLASS(SAMSUNG_VENDOR_ID, SAMSUNG_SCH_I330_ID, 0xff),
                .driver_info = (kernel_ulong_t)&palm_os_4_probe },
        { USB_DEVICE(SAMSUNG_VENDOR_ID, SAMSUNG_SPH_I500_ID),
                .driver_info = (kernel_ulong_t)&palm_os_4_probe },
index d684b4b8108ff34a5c4023088d9e927dcb378f6b..caf188800c679e7f24fc329903848a8c1f64d41a 100644 (file)
@@ -766,6 +766,13 @@ UNUSUAL_DEV(  0x059f, 0x0643, 0x0000, 0x0000,
                USB_SC_DEVICE, USB_PR_DEVICE, NULL,
                US_FL_GO_SLOW ),
 
+/* Reported by Christian Schaller <cschalle@redhat.com> */
+UNUSUAL_DEV(  0x059f, 0x0651, 0x0000, 0x0000,
+               "LaCie",
+               "External HDD",
+               USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+               US_FL_NO_WP_DETECT ),
+
 /* Submitted by Joel Bourquard <numlock@freesurf.ch>
  * Some versions of this device need the SubClass and Protocol overrides
  * while others don't.
index 69fab0fd15aec4a5cb05c870984dc7e4bc9dcad4..e9851add6f4ef251defba65a3bab9af359b60b3a 100644 (file)
@@ -907,8 +907,14 @@ static void vfio_pci_request(void *device_data, unsigned int count)
        mutex_lock(&vdev->igate);
 
        if (vdev->req_trigger) {
-               dev_dbg(&vdev->pdev->dev, "Requesting device from user\n");
+               if (!(count % 10))
+                       dev_notice_ratelimited(&vdev->pdev->dev,
+                               "Relaying device request to user (#%u)\n",
+                               count);
                eventfd_signal(vdev->req_trigger, 1);
+       } else if (count == 0) {
+               dev_warn(&vdev->pdev->dev,
+                       "No device request channel registered, blocked until released by user\n");
        }
 
        mutex_unlock(&vdev->igate);
index 0d336625ac7113b0e0cc10d4a9d00283cc673766..e1278fe04b1e7ba16eddbece247c21620b6e8fb7 100644 (file)
@@ -710,6 +710,8 @@ void *vfio_del_group_dev(struct device *dev)
        void *device_data = device->device_data;
        struct vfio_unbound_dev *unbound;
        unsigned int i = 0;
+       long ret;
+       bool interrupted = false;
 
        /*
         * The group exists so long as we have a device reference.  Get
@@ -755,9 +757,22 @@ void *vfio_del_group_dev(struct device *dev)
 
                vfio_device_put(device);
 
-       } while (wait_event_interruptible_timeout(vfio.release_q,
-                                                 !vfio_dev_present(group, dev),
-                                                 HZ * 10) <= 0);
+               if (interrupted) {
+                       ret = wait_event_timeout(vfio.release_q,
+                                       !vfio_dev_present(group, dev), HZ * 10);
+               } else {
+                       ret = wait_event_interruptible_timeout(vfio.release_q,
+                                       !vfio_dev_present(group, dev), HZ * 10);
+                       if (ret == -ERESTARTSYS) {
+                               interrupted = true;
+                               dev_warn(dev,
+                                        "Device is currently in use, task"
+                                        " \"%s\" (%d) "
+                                        "blocked until device is released",
+                                        current->comm, task_pid_nr(current));
+                       }
+               }
+       } while (ret <= 0);
 
        vfio_group_put(group);
 
index 5e19bb53b3a99a4ccc93696bf9792a4f4f1ad7c4..ea32b386797f5d52b70ee6f4028f5e8df43f3a8f 100644 (file)
@@ -1409,8 +1409,7 @@ vhost_scsi_set_endpoint(struct vhost_scsi *vs,
                         * dependency now.
                         */
                        se_tpg = &tpg->se_tpg;
-                       ret = configfs_depend_item(se_tpg->se_tpg_tfo->tf_subsys,
-                                                  &se_tpg->tpg_group.cg_item);
+                       ret = target_depend_item(&se_tpg->tpg_group.cg_item);
                        if (ret) {
                                pr_warn("configfs_depend_item() failed: %d\n", ret);
                                kfree(vs_tpg);
@@ -1513,8 +1512,7 @@ vhost_scsi_clear_endpoint(struct vhost_scsi *vs,
                 * to allow vhost-scsi WWPN se_tpg->tpg_group shutdown to occur.
                 */
                se_tpg = &tpg->se_tpg;
-               configfs_undepend_item(se_tpg->se_tpg_tfo->tf_subsys,
-                                      &se_tpg->tpg_group.cg_item);
+               target_undepend_item(&se_tpg->tpg_group.cg_item);
        }
        if (match) {
                for (i = 0; i < VHOST_SCSI_MAX_VQ; i++) {
index 3a145a643e0d5185146001275b47d3d0cc745454..6897f1c1bc732efe36632895fcedc577b7292a33 100644 (file)
@@ -274,6 +274,10 @@ static int pwm_backlight_probe(struct platform_device *pdev)
 
        pb->pwm = devm_pwm_get(&pdev->dev, NULL);
        if (IS_ERR(pb->pwm)) {
+               ret = PTR_ERR(pb->pwm);
+               if (ret == -EPROBE_DEFER)
+                       goto err_alloc;
+
                dev_err(&pdev->dev, "unable to request PWM, trying legacy API\n");
                pb->legacy = true;
                pb->pwm = pwm_request(data->pwm_id, "pwm-backlight");
index 35f7900a057347566bf566392315e2d26fa82ebb..ee3a703acf23aac813d613eafc1dedcf79d02dbb 100644 (file)
@@ -3705,8 +3705,8 @@ default_chipset:
         * access the videomem with writethrough cache
         */
        info->fix.smem_start = (u_long)ZTWO_PADDR(videomemory);
-       videomemory = (u_long)ioremap_writethrough(info->fix.smem_start,
-                                                  info->fix.smem_len);
+       videomemory = (u_long)ioremap_wt(info->fix.smem_start,
+                                        info->fix.smem_len);
        if (!videomemory) {
                dev_warn(&pdev->dev,
                         "Unable to map videomem cached writethrough\n");
index cb9ee25568506a5f3efeca661a4309634f255937..d6ce613e12adea198fa55dcf35a934a4e8dc66a7 100644 (file)
@@ -3185,8 +3185,7 @@ int __init atafb_init(void)
                /* Map the video memory (physical address given) to somewhere
                 * in the kernel address space.
                 */
-               external_screen_base = ioremap_writethrough(external_addr,
-                                                    external_len);
+               external_screen_base = ioremap_wt(external_addr, external_len);
                if (external_vgaiobase)
                        external_vgaiobase =
                          (unsigned long)ioremap(external_vgaiobase, 0x10000);
index a1b7e5fa9b099f7df0011672bde9afeb8d6405e8..9476d196f510c2c93e9275befc652bda0b7412f7 100644 (file)
@@ -241,8 +241,8 @@ static int hpfb_init_one(unsigned long phys_base, unsigned long virt_base)
        fb_info.fix.line_length = fb_width;
        fb_height = (in_8(fb_regs + HPFB_FBHMSB) << 8) | in_8(fb_regs + HPFB_FBHLSB);
        fb_info.fix.smem_len = fb_width * fb_height;
-       fb_start = (unsigned long)ioremap_writethrough(fb_info.fix.smem_start,
-                                                      fb_info.fix.smem_len);
+       fb_start = (unsigned long)ioremap_wt(fb_info.fix.smem_start,
+                                            fb_info.fix.smem_len);
        hpfb_defined.xres = (in_8(fb_regs + HPFB_DWMSB) << 8) | in_8(fb_regs + HPFB_DWLSB);
        hpfb_defined.yres = (in_8(fb_regs + HPFB_DHMSB) << 8) | in_8(fb_regs + HPFB_DHLSB);
        hpfb_defined.xres_virtual = hpfb_defined.xres;
index e894eb278d8336d018d3e6e8c29556dc9b5f3cb5..eba1b7ac729454d30b1d611cd01d45b5ba23407e 100644 (file)
@@ -423,6 +423,7 @@ int vp_set_vq_affinity(struct virtqueue *vq, int cpu)
                if (cpu == -1)
                        irq_set_affinity_hint(irq, NULL);
                else {
+                       cpumask_clear(mask);
                        cpumask_set_cpu(cpu, mask);
                        irq_set_affinity_hint(irq, mask);
                }
index 2b8553bd871514db8bfbd4877838c66d30f6598b..38387950490eb8dbd07c85434ac59e5129af2c1f 100644 (file)
@@ -957,7 +957,7 @@ unsigned xen_evtchn_nr_channels(void)
 }
 EXPORT_SYMBOL_GPL(xen_evtchn_nr_channels);
 
-int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
+int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu)
 {
        struct evtchn_bind_virq bind_virq;
        int evtchn, irq, ret;
@@ -971,8 +971,12 @@ int bind_virq_to_irq(unsigned int virq, unsigned int cpu)
                if (irq < 0)
                        goto out;
 
-               irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
-                                             handle_percpu_irq, "virq");
+               if (percpu)
+                       irq_set_chip_and_handler_name(irq, &xen_percpu_chip,
+                                                     handle_percpu_irq, "virq");
+               else
+                       irq_set_chip_and_handler_name(irq, &xen_dynamic_chip,
+                                                     handle_edge_irq, "virq");
 
                bind_virq.virq = virq;
                bind_virq.vcpu = cpu;
@@ -1062,7 +1066,7 @@ int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
 {
        int irq, retval;
 
-       irq = bind_virq_to_irq(virq, cpu);
+       irq = bind_virq_to_irq(virq, cpu, irqflags & IRQF_PERCPU);
        if (irq < 0)
                return irq;
        retval = request_irq(irq, handler, irqflags, devname, dev_id);
index fb9ffcb432779b55620909090b825ead7ddbcede..0923f2cf3c80aa2fb95a7385276de6f497ea46fe 100644 (file)
@@ -149,8 +149,6 @@ extern int v9fs_vfs_unlink(struct inode *i, struct dentry *d);
 extern int v9fs_vfs_rmdir(struct inode *i, struct dentry *d);
 extern int v9fs_vfs_rename(struct inode *old_dir, struct dentry *old_dentry,
                        struct inode *new_dir, struct dentry *new_dentry);
-extern void v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd,
-                       void *p);
 extern struct inode *v9fs_inode_from_fid(struct v9fs_session_info *v9ses,
                                         struct p9_fid *fid,
                                         struct super_block *sb, int new);
index 703342e309f57af329085db6d8f0b1b4814793dd..510040b04c964dbdeab4ab47c29b789fef5c4b5f 100644 (file)
@@ -1224,100 +1224,43 @@ ino_t v9fs_qid2ino(struct p9_qid *qid)
 }
 
 /**
- * v9fs_readlink - read a symlink's location (internal version)
+ * v9fs_vfs_follow_link - follow a symlink path
  * @dentry: dentry for symlink
- * @buffer: buffer to load symlink location into
- * @buflen: length of buffer
- *
+ * @cookie: place to pass the data to put_link()
  */
 
-static int v9fs_readlink(struct dentry *dentry, char *buffer, int buflen)
+static const char *v9fs_vfs_follow_link(struct dentry *dentry, void **cookie)
 {
-       int retval;
-
-       struct v9fs_session_info *v9ses;
-       struct p9_fid *fid;
+       struct v9fs_session_info *v9ses = v9fs_dentry2v9ses(dentry);
+       struct p9_fid *fid = v9fs_fid_lookup(dentry);
        struct p9_wstat *st;
+       char *res;
+
+       p9_debug(P9_DEBUG_VFS, "%pd\n", dentry);
 
-       p9_debug(P9_DEBUG_VFS, " %pd\n", dentry);
-       retval = -EPERM;
-       v9ses = v9fs_dentry2v9ses(dentry);
-       fid = v9fs_fid_lookup(dentry);
        if (IS_ERR(fid))
-               return PTR_ERR(fid);
+               return ERR_CAST(fid);
 
        if (!v9fs_proto_dotu(v9ses))
-               return -EBADF;
+               return ERR_PTR(-EBADF);
 
        st = p9_client_stat(fid);
        if (IS_ERR(st))
-               return PTR_ERR(st);
+               return ERR_CAST(st);
 
        if (!(st->mode & P9_DMSYMLINK)) {
-               retval = -EINVAL;
-               goto done;
+               p9stat_free(st);
+               kfree(st);
+               return ERR_PTR(-EINVAL);
        }
+       res = st->extension;
+       st->extension = NULL;
+       if (strlen(res) >= PATH_MAX)
+               res[PATH_MAX - 1] = '\0';
 
-       /* copy extension buffer into buffer */
-       retval = min(strlen(st->extension)+1, (size_t)buflen);
-       memcpy(buffer, st->extension, retval);
-
-       p9_debug(P9_DEBUG_VFS, "%pd -> %s (%.*s)\n",
-                dentry, st->extension, buflen, buffer);
-
-done:
        p9stat_free(st);
        kfree(st);
-       return retval;
-}
-
-/**
- * v9fs_vfs_follow_link - follow a symlink path
- * @dentry: dentry for symlink
- * @nd: nameidata
- *
- */
-
-static void *v9fs_vfs_follow_link(struct dentry *dentry, struct nameidata *nd)
-{
-       int len = 0;
-       char *link = __getname();
-
-       p9_debug(P9_DEBUG_VFS, "%pd\n", dentry);
-
-       if (!link)
-               link = ERR_PTR(-ENOMEM);
-       else {
-               len = v9fs_readlink(dentry, link, PATH_MAX);
-
-               if (len < 0) {
-                       __putname(link);
-                       link = ERR_PTR(len);
-               } else
-                       link[min(len, PATH_MAX-1)] = 0;
-       }
-       nd_set_link(nd, link);
-
-       return NULL;
-}
-
-/**
- * v9fs_vfs_put_link - release a symlink path
- * @dentry: dentry for symlink
- * @nd: nameidata
- * @p: unused
- *
- */
-
-void
-v9fs_vfs_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
-{
-       char *s = nd_get_link(nd);
-
-       p9_debug(P9_DEBUG_VFS, " %pd %s\n",
-                dentry, IS_ERR(s) ? "<error>" : s);
-       if (!IS_ERR(s))
-               __putname(s);
+       return *cookie = res;
 }
 
 /**
@@ -1370,6 +1313,8 @@ v9fs_vfs_symlink(struct inode *dir, struct dentry *dentry, const char *symname)
        return v9fs_vfs_mkspecial(dir, dentry, P9_DMSYMLINK, symname);
 }
 
+#define U32_MAX_DIGITS 10
+
 /**
  * v9fs_vfs_link - create a hardlink
  * @old_dentry: dentry for file to link to
@@ -1383,7 +1328,7 @@ v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir,
              struct dentry *dentry)
 {
        int retval;
-       char *name;
+       char name[1 + U32_MAX_DIGITS + 2]; /* sign + number + \n + \0 */
        struct p9_fid *oldfid;
 
        p9_debug(P9_DEBUG_VFS, " %lu,%pd,%pd\n",
@@ -1393,20 +1338,12 @@ v9fs_vfs_link(struct dentry *old_dentry, struct inode *dir,
        if (IS_ERR(oldfid))
                return PTR_ERR(oldfid);
 
-       name = __getname();
-       if (unlikely(!name)) {
-               retval = -ENOMEM;
-               goto clunk_fid;
-       }
-
        sprintf(name, "%d\n", oldfid->fid);
        retval = v9fs_vfs_mkspecial(dir, dentry, P9_DMLINK, name);
-       __putname(name);
        if (!retval) {
                v9fs_refresh_inode(oldfid, d_inode(old_dentry));
                v9fs_invalidate_inode_attr(dir);
        }
-clunk_fid:
        p9_client_clunk(oldfid);
        return retval;
 }
@@ -1425,7 +1362,7 @@ v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rde
 {
        struct v9fs_session_info *v9ses = v9fs_inode2v9ses(dir);
        int retval;
-       char *name;
+       char name[2 + U32_MAX_DIGITS + 1 + U32_MAX_DIGITS + 1];
        u32 perm;
 
        p9_debug(P9_DEBUG_VFS, " %lu,%pd mode: %hx MAJOR: %u MINOR: %u\n",
@@ -1435,26 +1372,16 @@ v9fs_vfs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, dev_t rde
        if (!new_valid_dev(rdev))
                return -EINVAL;
 
-       name = __getname();
-       if (!name)
-               return -ENOMEM;
        /* build extension */
        if (S_ISBLK(mode))
                sprintf(name, "b %u %u", MAJOR(rdev), MINOR(rdev));
        else if (S_ISCHR(mode))
                sprintf(name, "c %u %u", MAJOR(rdev), MINOR(rdev));
-       else if (S_ISFIFO(mode))
-               *name = 0;
-       else if (S_ISSOCK(mode))
+       else
                *name = 0;
-       else {
-               __putname(name);
-               return -EINVAL;
-       }
 
        perm = unixmode2p9mode(v9ses, mode);
        retval = v9fs_vfs_mkspecial(dir, dentry, perm, name);
-       __putname(name);
 
        return retval;
 }
@@ -1530,7 +1457,7 @@ static const struct inode_operations v9fs_file_inode_operations = {
 static const struct inode_operations v9fs_symlink_inode_operations = {
        .readlink = generic_readlink,
        .follow_link = v9fs_vfs_follow_link,
-       .put_link = v9fs_vfs_put_link,
+       .put_link = kfree_put_link,
        .getattr = v9fs_vfs_getattr,
        .setattr = v9fs_vfs_setattr,
 };
index 9861c7c951a6dbd293e78d84e190f0908b4a1921..09e4433717b8795c2c7c8c76452887ec27a91be4 100644 (file)
@@ -905,41 +905,24 @@ error:
 /**
  * v9fs_vfs_follow_link_dotl - follow a symlink path
  * @dentry: dentry for symlink
- * @nd: nameidata
- *
+ * @cookie: place to pass the data to put_link()
  */
 
-static void *
-v9fs_vfs_follow_link_dotl(struct dentry *dentry, struct nameidata *nd)
+static const char *
+v9fs_vfs_follow_link_dotl(struct dentry *dentry, void **cookie)
 {
-       int retval;
-       struct p9_fid *fid;
-       char *link = __getname();
+       struct p9_fid *fid = v9fs_fid_lookup(dentry);
        char *target;
+       int retval;
 
        p9_debug(P9_DEBUG_VFS, "%pd\n", dentry);
 
-       if (!link) {
-               link = ERR_PTR(-ENOMEM);
-               goto ndset;
-       }
-       fid = v9fs_fid_lookup(dentry);
-       if (IS_ERR(fid)) {
-               __putname(link);
-               link = ERR_CAST(fid);
-               goto ndset;
-       }
+       if (IS_ERR(fid))
+               return ERR_CAST(fid);
        retval = p9_client_readlink(fid, &target);
-       if (!retval) {
-               strcpy(link, target);
-               kfree(target);
-               goto ndset;
-       }
-       __putname(link);
-       link = ERR_PTR(retval);
-ndset:
-       nd_set_link(nd, link);
-       return NULL;
+       if (retval)
+               return ERR_PTR(retval);
+       return *cookie = target;
 }
 
 int v9fs_refresh_inode_dotl(struct p9_fid *fid, struct inode *inode)
@@ -1006,7 +989,7 @@ const struct inode_operations v9fs_file_inode_operations_dotl = {
 const struct inode_operations v9fs_symlink_inode_operations_dotl = {
        .readlink = generic_readlink,
        .follow_link = v9fs_vfs_follow_link_dotl,
-       .put_link = v9fs_vfs_put_link,
+       .put_link = kfree_put_link,
        .getattr = v9fs_vfs_getattr_dotl,
        .setattr = v9fs_vfs_setattr_dotl,
        .setxattr = generic_setxattr,
index de58cc7b8076178605cea8f776031945d7d6da62..da0c33481bc0387788bcf4ce1792b38e141804e4 100644 (file)
 
 #include "autofs_i.h"
 
-static void *autofs4_follow_link(struct dentry *dentry, struct nameidata *nd)
+static const char *autofs4_follow_link(struct dentry *dentry, void **cookie)
 {
        struct autofs_sb_info *sbi = autofs4_sbi(dentry->d_sb);
        struct autofs_info *ino = autofs4_dentry_ino(dentry);
        if (ino && !autofs4_oz_mode(sbi))
                ino->last_used = jiffies;
-       nd_set_link(nd, d_inode(dentry)->i_private);
-       return NULL;
+       return d_inode(dentry)->i_private;
 }
 
 const struct inode_operations autofs4_symlink_inode_operations = {
index 7943533c386802dc880051cc7ac2b078cf5d3ccb..46aedacfa6a8d4131563a83a8402daf89d590ddb 100644 (file)
@@ -42,8 +42,7 @@ static struct inode *befs_iget(struct super_block *, unsigned long);
 static struct inode *befs_alloc_inode(struct super_block *sb);
 static void befs_destroy_inode(struct inode *inode);
 static void befs_destroy_inodecache(void);
-static void *befs_follow_link(struct dentry *, struct nameidata *);
-static void *befs_fast_follow_link(struct dentry *, struct nameidata *);
+static const char *befs_follow_link(struct dentry *, void **);
 static int befs_utf2nls(struct super_block *sb, const char *in, int in_len,
                        char **out, int *out_len);
 static int befs_nls2utf(struct super_block *sb, const char *in, int in_len,
@@ -80,11 +79,6 @@ static const struct address_space_operations befs_aops = {
        .bmap           = befs_bmap,
 };
 
-static const struct inode_operations befs_fast_symlink_inode_operations = {
-       .readlink       = generic_readlink,
-       .follow_link    = befs_fast_follow_link,
-};
-
 static const struct inode_operations befs_symlink_inode_operations = {
        .readlink       = generic_readlink,
        .follow_link    = befs_follow_link,
@@ -403,10 +397,12 @@ static struct inode *befs_iget(struct super_block *sb, unsigned long ino)
                inode->i_op = &befs_dir_inode_operations;
                inode->i_fop = &befs_dir_operations;
        } else if (S_ISLNK(inode->i_mode)) {
-               if (befs_ino->i_flags & BEFS_LONG_SYMLINK)
+               if (befs_ino->i_flags & BEFS_LONG_SYMLINK) {
                        inode->i_op = &befs_symlink_inode_operations;
-               else
-                       inode->i_op = &befs_fast_symlink_inode_operations;
+               } else {
+                       inode->i_link = befs_ino->i_data.symlink;
+                       inode->i_op = &simple_symlink_inode_operations;
+               }
        } else {
                befs_error(sb, "Inode %lu is not a regular file, "
                           "directory or symlink. THAT IS WRONG! BeFS has no "
@@ -467,8 +463,8 @@ befs_destroy_inodecache(void)
  * The data stream become link name. Unless the LONG_SYMLINK
  * flag is set.
  */
-static void *
-befs_follow_link(struct dentry *dentry, struct nameidata *nd)
+static const char *
+befs_follow_link(struct dentry *dentry, void **cookie)
 {
        struct super_block *sb = dentry->d_sb;
        struct befs_inode_info *befs_ino = BEFS_I(d_inode(dentry));
@@ -478,33 +474,20 @@ befs_follow_link(struct dentry *dentry, struct nameidata *nd)
 
        if (len == 0) {
                befs_error(sb, "Long symlink with illegal length");
-               link = ERR_PTR(-EIO);
-       } else {
-               befs_debug(sb, "Follow long symlink");
-
-               link = kmalloc(len, GFP_NOFS);
-               if (!link) {
-                       link = ERR_PTR(-ENOMEM);
-               } else if (befs_read_lsymlink(sb, data, link, len) != len) {
-                       kfree(link);
-                       befs_error(sb, "Failed to read entire long symlink");
-                       link = ERR_PTR(-EIO);
-               } else {
-                       link[len - 1] = '\0';
-               }
+               return ERR_PTR(-EIO);
        }
-       nd_set_link(nd, link);
-       return NULL;
-}
-
-
-static void *
-befs_fast_follow_link(struct dentry *dentry, struct nameidata *nd)
-{
-       struct befs_inode_info *befs_ino = BEFS_I(d_inode(dentry));
+       befs_debug(sb, "Follow long symlink");
 
-       nd_set_link(nd, befs_ino->i_data.symlink);
-       return NULL;
+       link = kmalloc(len, GFP_NOFS);
+       if (!link)
+               return ERR_PTR(-ENOMEM);
+       if (befs_read_lsymlink(sb, data, link, len) != len) {
+               kfree(link);
+               befs_error(sb, "Failed to read entire long symlink");
+               return ERR_PTR(-EIO);
+       }
+       link[len - 1] = '\0';
+       return *cookie = link;
 }
 
 /*
index 241ef68d28930a7faed26f18b67b296138e61d9e..cd46e415883090747d8238c2a2fbaa9b101dbc5e 100644 (file)
@@ -918,7 +918,7 @@ static int load_elf_binary(struct linux_binprm *bprm)
                        total_size = total_mapping_size(elf_phdata,
                                                        loc->elf_ex.e_phnum);
                        if (!total_size) {
-                               error = -EINVAL;
+                               retval = -EINVAL;
                                goto out_free_dentry;
                        }
                }
index 9de772ee0031707c59292ac7849520ef8d6e9e47..614aaa1969bdfded3485ae9a72146269dd9101eb 100644 (file)
@@ -880,6 +880,8 @@ static int __add_keyed_refs(struct btrfs_fs_info *fs_info,
  * indirect refs to their parent bytenr.
  * When roots are found, they're added to the roots list
  *
+ * NOTE: This can return values > 0
+ *
  * FIXME some caching might speed things up
  */
 static int find_parent_nodes(struct btrfs_trans_handle *trans,
@@ -1198,6 +1200,19 @@ int btrfs_find_all_roots(struct btrfs_trans_handle *trans,
        return ret;
 }
 
+/**
+ * btrfs_check_shared - tell us whether an extent is shared
+ *
+ * @trans: optional trans handle
+ *
+ * btrfs_check_shared uses the backref walking code but will short
+ * circuit as soon as it finds a root or inode that doesn't match the
+ * one passed in. This provides a significant performance benefit for
+ * callers (such as fiemap) which want to know whether the extent is
+ * shared but do not need a ref count.
+ *
+ * Return: 0 if extent is not shared, 1 if it is shared, < 0 on error.
+ */
 int btrfs_check_shared(struct btrfs_trans_handle *trans,
                       struct btrfs_fs_info *fs_info, u64 root_objectid,
                       u64 inum, u64 bytenr)
@@ -1226,11 +1241,13 @@ int btrfs_check_shared(struct btrfs_trans_handle *trans,
                ret = find_parent_nodes(trans, fs_info, bytenr, elem.seq, tmp,
                                        roots, NULL, root_objectid, inum);
                if (ret == BACKREF_FOUND_SHARED) {
+                       /* this is the only condition under which we return 1 */
                        ret = 1;
                        break;
                }
                if (ret < 0 && ret != -ENOENT)
                        break;
+               ret = 0;
                node = ulist_next(tmp, &uiter);
                if (!node)
                        break;
index 0ec8e228b89f42505cc0c9c8ffef96e1a2f3f9a4..0ec3acd14cbf5e1273f331231f09165710d80d91 100644 (file)
@@ -3180,8 +3180,6 @@ static int write_one_cache_group(struct btrfs_trans_handle *trans,
        btrfs_mark_buffer_dirty(leaf);
 fail:
        btrfs_release_path(path);
-       if (ret)
-               btrfs_abort_transaction(trans, root, ret);
        return ret;
 
 }
@@ -3487,8 +3485,30 @@ again:
                                ret = 0;
                        }
                }
-               if (!ret)
+               if (!ret) {
                        ret = write_one_cache_group(trans, root, path, cache);
+                       /*
+                        * Our block group might still be attached to the list
+                        * of new block groups in the transaction handle of some
+                        * other task (struct btrfs_trans_handle->new_bgs). This
+                        * means its block group item isn't yet in the extent
+                        * tree. If this happens ignore the error, as we will
+                        * try again later in the critical section of the
+                        * transaction commit.
+                        */
+                       if (ret == -ENOENT) {
+                               ret = 0;
+                               spin_lock(&cur_trans->dirty_bgs_lock);
+                               if (list_empty(&cache->dirty_list)) {
+                                       list_add_tail(&cache->dirty_list,
+                                                     &cur_trans->dirty_bgs);
+                                       btrfs_get_block_group(cache);
+                               }
+                               spin_unlock(&cur_trans->dirty_bgs_lock);
+                       } else if (ret) {
+                               btrfs_abort_transaction(trans, root, ret);
+                       }
+               }
 
                /* if its not on the io list, we need to put the block group */
                if (should_put)
@@ -3597,8 +3617,11 @@ int btrfs_write_dirty_block_groups(struct btrfs_trans_handle *trans,
                                ret = 0;
                        }
                }
-               if (!ret)
+               if (!ret) {
                        ret = write_one_cache_group(trans, root, path, cache);
+                       if (ret)
+                               btrfs_abort_transaction(trans, root, ret);
+               }
 
                /* if its not on the io list, we need to put the block group */
                if (should_put)
@@ -8806,6 +8829,24 @@ again:
                goto again;
        }
 
+       /*
+        * if we are changing raid levels, try to allocate a corresponding
+        * block group with the new raid level.
+        */
+       alloc_flags = update_block_group_flags(root, cache->flags);
+       if (alloc_flags != cache->flags) {
+               ret = do_chunk_alloc(trans, root, alloc_flags,
+                                    CHUNK_ALLOC_FORCE);
+               /*
+                * ENOSPC is allowed here, we may have enough space
+                * already allocated at the new raid level to
+                * carry on
+                */
+               if (ret == -ENOSPC)
+                       ret = 0;
+               if (ret < 0)
+                       goto out;
+       }
 
        ret = set_block_group_ro(cache, 0);
        if (!ret)
@@ -8819,7 +8860,9 @@ again:
 out:
        if (cache->flags & BTRFS_BLOCK_GROUP_SYSTEM) {
                alloc_flags = update_block_group_flags(root, cache->flags);
+               lock_chunks(root->fs_info->chunk_root);
                check_system_chunk(trans, root, alloc_flags);
+               unlock_chunks(root->fs_info->chunk_root);
        }
        mutex_unlock(&root->fs_info->ro_block_group_mutex);
 
index 43af5a61ad25b4dbb3f4c0f2cb89160c120d1ac7..c32d226bfeccbb28f25f2f417fa9e57b14411136 100644 (file)
@@ -4772,6 +4772,25 @@ struct extent_buffer *find_extent_buffer(struct btrfs_fs_info *fs_info,
                               start >> PAGE_CACHE_SHIFT);
        if (eb && atomic_inc_not_zero(&eb->refs)) {
                rcu_read_unlock();
+               /*
+                * Lock our eb's refs_lock to avoid races with
+                * free_extent_buffer. When we get our eb it might be flagged
+                * with EXTENT_BUFFER_STALE and another task running
+                * free_extent_buffer might have seen that flag set,
+                * eb->refs == 2, that the buffer isn't under IO (dirty and
+                * writeback flags not set) and it's still in the tree (flag
+                * EXTENT_BUFFER_TREE_REF set), therefore being in the process
+                * of decrementing the extent buffer's reference count twice.
+                * So here we could race and increment the eb's reference count,
+                * clear its stale flag, mark it as dirty and drop our reference
+                * before the other task finishes executing free_extent_buffer,
+                * which would later result in an attempt to free an extent
+                * buffer that is dirty.
+                */
+               if (test_bit(EXTENT_BUFFER_STALE, &eb->bflags)) {
+                       spin_lock(&eb->refs_lock);
+                       spin_unlock(&eb->refs_lock);
+               }
                mark_extent_buffer_accessed(eb, NULL);
                return eb;
        }
index 41c510b7cc110891bbc3818638137e3e516d5800..9dbe5b548fa6a74029960de0ea1d8ebf63f835e8 100644 (file)
@@ -86,7 +86,7 @@ static struct inode *__lookup_free_space_inode(struct btrfs_root *root,
 
        mapping_set_gfp_mask(inode->i_mapping,
                        mapping_gfp_mask(inode->i_mapping) &
-                       ~(GFP_NOFS & ~__GFP_HIGHMEM));
+                       ~(__GFP_FS | __GFP_HIGHMEM));
 
        return inode;
 }
@@ -3466,6 +3466,7 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
        struct btrfs_free_space_ctl *ctl = root->free_ino_ctl;
        int ret;
        struct btrfs_io_ctl io_ctl;
+       bool release_metadata = true;
 
        if (!btrfs_test_opt(root, INODE_MAP_CACHE))
                return 0;
@@ -3473,11 +3474,20 @@ int btrfs_write_out_ino_cache(struct btrfs_root *root,
        memset(&io_ctl, 0, sizeof(io_ctl));
        ret = __btrfs_write_out_cache(root, inode, ctl, NULL, &io_ctl,
                                      trans, path, 0);
-       if (!ret)
+       if (!ret) {
+               /*
+                * At this point writepages() didn't error out, so our metadata
+                * reservation is released when the writeback finishes, at
+                * inode.c:btrfs_finish_ordered_io(), regardless of it finishing
+                * with or without an error.
+                */
+               release_metadata = false;
                ret = btrfs_wait_cache_io(root, trans, NULL, &io_ctl, path, 0);
+       }
 
        if (ret) {
-               btrfs_delalloc_release_metadata(inode, inode->i_size);
+               if (release_metadata)
+                       btrfs_delalloc_release_metadata(inode, inode->i_size);
 #ifdef DEBUG
                btrfs_err(root->fs_info,
                        "failed to write free ino cache for root %llu",
index 157cc54fc63486e485a95bf6d8d6da692ef171ca..760c4a5e096b4d5a403f7923ad4b65537a085886 100644 (file)
@@ -722,6 +722,7 @@ void btrfs_start_ordered_extent(struct inode *inode,
 int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
 {
        int ret = 0;
+       int ret_wb = 0;
        u64 end;
        u64 orig_end;
        struct btrfs_ordered_extent *ordered;
@@ -741,9 +742,14 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
        if (ret)
                return ret;
 
-       ret = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
-       if (ret)
-               return ret;
+       /*
+        * If we have a writeback error don't return immediately. Wait first
+        * for any ordered extents that haven't completed yet. This is to make
+        * sure no one can dirty the same page ranges and call writepages()
+        * before the ordered extents complete - to avoid failures (-EEXIST)
+        * when adding the new ordered extents to the ordered tree.
+        */
+       ret_wb = filemap_fdatawait_range(inode->i_mapping, start, orig_end);
 
        end = orig_end;
        while (1) {
@@ -767,7 +773,7 @@ int btrfs_wait_ordered_range(struct inode *inode, u64 start, u64 len)
                        break;
                end--;
        }
-       return ret;
+       return ret_wb ? ret_wb : ret;
 }
 
 /*
index 96aebf3bcd5b37d35604c76ec584d29686ad34e2..174f5e1e00abfa533b1cb7483e44aae0f550e63a 100644 (file)
@@ -4625,6 +4625,7 @@ int btrfs_alloc_chunk(struct btrfs_trans_handle *trans,
 {
        u64 chunk_offset;
 
+       ASSERT(mutex_is_locked(&extent_root->fs_info->chunk_mutex));
        chunk_offset = find_next_chunk(extent_root->fs_info);
        return __btrfs_alloc_chunk(trans, extent_root, chunk_offset, type);
 }
index e876e1944519a330a2cc1f44e33a031139a35438..571acd88606cfcec3d01fc4a6ef453f0b49e9713 100644 (file)
@@ -6,7 +6,6 @@
 #include <linux/string.h>
 #include <linux/uaccess.h>
 #include <linux/kernel.h>
-#include <linux/namei.h>
 #include <linux/writeback.h>
 #include <linux/vmalloc.h>
 #include <linux/posix_acl.h>
@@ -819,6 +818,7 @@ static int fill_inode(struct inode *inode, struct page *locked_page,
                        else
                                kfree(sym); /* lost a race */
                }
+               inode->i_link = ci->i_symlink;
                break;
        case S_IFDIR:
                inode->i_op = &ceph_dir_iops;
@@ -1691,16 +1691,9 @@ retry:
 /*
  * symlinks
  */
-static void *ceph_sym_follow_link(struct dentry *dentry, struct nameidata *nd)
-{
-       struct ceph_inode_info *ci = ceph_inode(d_inode(dentry));
-       nd_set_link(nd, ci->i_symlink);
-       return NULL;
-}
-
 static const struct inode_operations ceph_symlink_iops = {
        .readlink = generic_readlink,
-       .follow_link = ceph_sym_follow_link,
+       .follow_link = simple_follow_link,
        .setattr = ceph_setattr,
        .getattr = ceph_getattr,
        .setxattr = ceph_setxattr,
index 430e0348c99ebb9b86c65ccd957a1b5e69ed6a2a..7dc886c9a78fc428b368a1c911b8c1ad745f48a5 100644 (file)
@@ -24,6 +24,7 @@
 #include "cifsfs.h"
 #include "dns_resolve.h"
 #include "cifs_debug.h"
+#include "cifs_unicode.h"
 
 static LIST_HEAD(cifs_dfs_automount_list);
 
@@ -312,7 +313,7 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
        xid = get_xid();
        rc = get_dfs_path(xid, ses, full_path + 1, cifs_sb->local_nls,
                &num_referrals, &referrals,
-               cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
+               cifs_remap(cifs_sb));
        free_xid(xid);
 
        cifs_put_tlink(tlink);
index 0303c6793d903ab07cb7d1829bc372f0be3fc15f..5a53ac6b1e02515be90a4e446b103aa9f6f26874 100644 (file)
 #include "cifsglob.h"
 #include "cifs_debug.h"
 
-/*
- * cifs_utf16_bytes - how long will a string be after conversion?
- * @utf16 - pointer to input string
- * @maxbytes - don't go past this many bytes of input string
- * @codepage - destination codepage
- *
- * Walk a utf16le string and return the number of bytes that the string will
- * be after being converted to the given charset, not including any null
- * termination required. Don't walk past maxbytes in the source buffer.
- */
-int
-cifs_utf16_bytes(const __le16 *from, int maxbytes,
-               const struct nls_table *codepage)
-{
-       int i;
-       int charlen, outlen = 0;
-       int maxwords = maxbytes / 2;
-       char tmp[NLS_MAX_CHARSET_SIZE];
-       __u16 ftmp;
-
-       for (i = 0; i < maxwords; i++) {
-               ftmp = get_unaligned_le16(&from[i]);
-               if (ftmp == 0)
-                       break;
-
-               charlen = codepage->uni2char(ftmp, tmp, NLS_MAX_CHARSET_SIZE);
-               if (charlen > 0)
-                       outlen += charlen;
-               else
-                       outlen++;
-       }
-
-       return outlen;
-}
-
 int cifs_remap(struct cifs_sb_info *cifs_sb)
 {
        int map_type;
@@ -155,10 +120,13 @@ convert_sfm_char(const __u16 src_char, char *target)
  * enough to hold the result of the conversion (at least NLS_MAX_CHARSET_SIZE).
  */
 static int
-cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp,
+cifs_mapchar(char *target, const __u16 *from, const struct nls_table *cp,
             int maptype)
 {
        int len = 1;
+       __u16 src_char;
+
+       src_char = *from;
 
        if ((maptype == SFM_MAP_UNI_RSVD) && convert_sfm_char(src_char, target))
                return len;
@@ -168,10 +136,23 @@ cifs_mapchar(char *target, const __u16 src_char, const struct nls_table *cp,
 
        /* if character not one of seven in special remap set */
        len = cp->uni2char(src_char, target, NLS_MAX_CHARSET_SIZE);
-       if (len <= 0) {
-               *target = '?';
-               len = 1;
-       }
+       if (len <= 0)
+               goto surrogate_pair;
+
+       return len;
+
+surrogate_pair:
+       /* convert SURROGATE_PAIR and IVS */
+       if (strcmp(cp->charset, "utf8"))
+               goto unknown;
+       len = utf16s_to_utf8s(from, 3, UTF16_LITTLE_ENDIAN, target, 6);
+       if (len <= 0)
+               goto unknown;
+       return len;
+
+unknown:
+       *target = '?';
+       len = 1;
        return len;
 }
 
@@ -206,7 +187,7 @@ cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
        int nullsize = nls_nullsize(codepage);
        int fromwords = fromlen / 2;
        char tmp[NLS_MAX_CHARSET_SIZE];
-       __u16 ftmp;
+       __u16 ftmp[3];          /* ftmp[3] = 3array x 2bytes = 6bytes UTF-16 */
 
        /*
         * because the chars can be of varying widths, we need to take care
@@ -217,9 +198,17 @@ cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
        safelen = tolen - (NLS_MAX_CHARSET_SIZE + nullsize);
 
        for (i = 0; i < fromwords; i++) {
-               ftmp = get_unaligned_le16(&from[i]);
-               if (ftmp == 0)
+               ftmp[0] = get_unaligned_le16(&from[i]);
+               if (ftmp[0] == 0)
                        break;
+               if (i + 1 < fromwords)
+                       ftmp[1] = get_unaligned_le16(&from[i + 1]);
+               else
+                       ftmp[1] = 0;
+               if (i + 2 < fromwords)
+                       ftmp[2] = get_unaligned_le16(&from[i + 2]);
+               else
+                       ftmp[2] = 0;
 
                /*
                 * check to see if converting this character might make the
@@ -234,6 +223,17 @@ cifs_from_utf16(char *to, const __le16 *from, int tolen, int fromlen,
                /* put converted char into 'to' buffer */
                charlen = cifs_mapchar(&to[outlen], ftmp, codepage, map_type);
                outlen += charlen;
+
+               /* charlen (=bytes of UTF-8 for 1 character)
+                * 4bytes UTF-8(surrogate pair) is charlen=4
+                *   (4bytes UTF-16 code)
+                * 7-8bytes UTF-8(IVS) is charlen=3+4 or 4+4
+                *   (2 UTF-8 pairs divided to 2 UTF-16 pairs) */
+               if (charlen == 4)
+                       i++;
+               else if (charlen >= 5)
+                       /* 5-6bytes UTF-8 */
+                       i += 2;
        }
 
        /* properly null-terminate string */
@@ -295,6 +295,46 @@ success:
        return i;
 }
 
+/*
+ * cifs_utf16_bytes - how long will a string be after conversion?
+ * @utf16 - pointer to input string
+ * @maxbytes - don't go past this many bytes of input string
+ * @codepage - destination codepage
+ *
+ * Walk a utf16le string and return the number of bytes that the string will
+ * be after being converted to the given charset, not including any null
+ * termination required. Don't walk past maxbytes in the source buffer.
+ */
+int
+cifs_utf16_bytes(const __le16 *from, int maxbytes,
+               const struct nls_table *codepage)
+{
+       int i;
+       int charlen, outlen = 0;
+       int maxwords = maxbytes / 2;
+       char tmp[NLS_MAX_CHARSET_SIZE];
+       __u16 ftmp[3];
+
+       for (i = 0; i < maxwords; i++) {
+               ftmp[0] = get_unaligned_le16(&from[i]);
+               if (ftmp[0] == 0)
+                       break;
+               if (i + 1 < maxwords)
+                       ftmp[1] = get_unaligned_le16(&from[i + 1]);
+               else
+                       ftmp[1] = 0;
+               if (i + 2 < maxwords)
+                       ftmp[2] = get_unaligned_le16(&from[i + 2]);
+               else
+                       ftmp[2] = 0;
+
+               charlen = cifs_mapchar(tmp, ftmp, codepage, NO_MAP_UNI_RSVD);
+               outlen += charlen;
+       }
+
+       return outlen;
+}
+
 /*
  * cifs_strndup_from_utf16 - copy a string from wire format to the local
  * codepage
@@ -409,10 +449,15 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
        char src_char;
        __le16 dst_char;
        wchar_t tmp;
+       wchar_t *wchar_to;      /* UTF-16 */
+       int ret;
+       unicode_t u;
 
        if (map_chars == NO_MAP_UNI_RSVD)
                return cifs_strtoUTF16(target, source, PATH_MAX, cp);
 
+       wchar_to = kzalloc(6, GFP_KERNEL);
+
        for (i = 0; i < srclen; j++) {
                src_char = source[i];
                charlen = 1;
@@ -441,11 +486,55 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
                         * if no match, use question mark, which at least in
                         * some cases serves as wild card
                         */
-                       if (charlen < 1) {
-                               dst_char = cpu_to_le16(0x003f);
-                               charlen = 1;
+                       if (charlen > 0)
+                               goto ctoUTF16;
+
+                       /* convert SURROGATE_PAIR */
+                       if (strcmp(cp->charset, "utf8") || !wchar_to)
+                               goto unknown;
+                       if (*(source + i) & 0x80) {
+                               charlen = utf8_to_utf32(source + i, 6, &u);
+                               if (charlen < 0)
+                                       goto unknown;
+                       } else
+                               goto unknown;
+                       ret  = utf8s_to_utf16s(source + i, charlen,
+                                              UTF16_LITTLE_ENDIAN,
+                                              wchar_to, 6);
+                       if (ret < 0)
+                               goto unknown;
+
+                       i += charlen;
+                       dst_char = cpu_to_le16(*wchar_to);
+                       if (charlen <= 3)
+                               /* 1-3bytes UTF-8 to 2bytes UTF-16 */
+                               put_unaligned(dst_char, &target[j]);
+                       else if (charlen == 4) {
+                               /* 4bytes UTF-8(surrogate pair) to 4bytes UTF-16
+                                * 7-8bytes UTF-8(IVS) divided to 2 UTF-16
+                                *   (charlen=3+4 or 4+4) */
+                               put_unaligned(dst_char, &target[j]);
+                               dst_char = cpu_to_le16(*(wchar_to + 1));
+                               j++;
+                               put_unaligned(dst_char, &target[j]);
+                       } else if (charlen >= 5) {
+                               /* 5-6bytes UTF-8 to 6bytes UTF-16 */
+                               put_unaligned(dst_char, &target[j]);
+                               dst_char = cpu_to_le16(*(wchar_to + 1));
+                               j++;
+                               put_unaligned(dst_char, &target[j]);
+                               dst_char = cpu_to_le16(*(wchar_to + 2));
+                               j++;
+                               put_unaligned(dst_char, &target[j]);
                        }
+                       continue;
+
+unknown:
+                       dst_char = cpu_to_le16(0x003f);
+                       charlen = 1;
                }
+
+ctoUTF16:
                /*
                 * character may take more than one byte in the source string,
                 * but will take exactly two bytes in the target string
@@ -456,6 +545,7 @@ cifsConvertToUTF16(__le16 *target, const char *source, int srclen,
 
 ctoUTF16_out:
        put_unaligned(0, &target[j]); /* Null terminate target unicode string */
+       kfree(wchar_to);
        return j;
 }
 
index f5089bde363576dcab6a35887f3c539a8a7e6247..0a9fb6b53126a7c95715a862bfb3b067f443fc1a 100644 (file)
@@ -469,6 +469,8 @@ cifs_show_options(struct seq_file *s, struct dentry *root)
                seq_puts(s, ",nouser_xattr");
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
                seq_puts(s, ",mapchars");
+       if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SFM_CHR)
+               seq_puts(s, ",mapposix");
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
                seq_puts(s, ",sfu");
        if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
index 252f5c15806bc2f18f5c1c10ff7c2bde0aedba6d..a782b22904e40b71387d844a6a7879bab8191a88 100644 (file)
@@ -120,7 +120,7 @@ extern struct vfsmount *cifs_dfs_d_automount(struct path *path);
 #endif
 
 /* Functions related to symlinks */
-extern void *cifs_follow_link(struct dentry *direntry, struct nameidata *nd);
+extern const char *cifs_follow_link(struct dentry *direntry, void **cookie);
 extern int cifs_readlink(struct dentry *direntry, char __user *buffer,
                         int buflen);
 extern int cifs_symlink(struct inode *inode, struct dentry *direntry,
index c31ce98c1704a32b998f993d9a26613dc1342e29..c63fd1dde25b861b011f604522572c5619f177f1 100644 (file)
@@ -361,11 +361,11 @@ extern int CIFSUnixCreateHardLink(const unsigned int xid,
 extern int CIFSUnixCreateSymLink(const unsigned int xid,
                        struct cifs_tcon *tcon,
                        const char *fromName, const char *toName,
-                       const struct nls_table *nls_codepage);
+                       const struct nls_table *nls_codepage, int remap);
 extern int CIFSSMBUnixQuerySymLink(const unsigned int xid,
                        struct cifs_tcon *tcon,
                        const unsigned char *searchName, char **syminfo,
-                       const struct nls_table *nls_codepage);
+                       const struct nls_table *nls_codepage, int remap);
 extern int CIFSSMBQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon,
                               __u16 fid, char **symlinkinfo,
                               const struct nls_table *nls_codepage);
index 84650a51c7c4064357eab083868cc613a75f7f18..f26ffbfc64d8b4eca26b8e8101f705043fc7a4a0 100644 (file)
@@ -2784,7 +2784,7 @@ copyRetry:
 int
 CIFSUnixCreateSymLink(const unsigned int xid, struct cifs_tcon *tcon,
                      const char *fromName, const char *toName,
-                     const struct nls_table *nls_codepage)
+                     const struct nls_table *nls_codepage, int remap)
 {
        TRANSACTION2_SPI_REQ *pSMB = NULL;
        TRANSACTION2_SPI_RSP *pSMBr = NULL;
@@ -2804,9 +2804,9 @@ createSymLinkRetry:
 
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                name_len =
-                   cifs_strtoUTF16((__le16 *) pSMB->FileName, fromName,
-                                   /* find define for this maxpathcomponent */
-                                   PATH_MAX, nls_codepage);
+                   cifsConvertToUTF16((__le16 *) pSMB->FileName, fromName,
+                               /* find define for this maxpathcomponent */
+                                       PATH_MAX, nls_codepage, remap);
                name_len++;     /* trailing null */
                name_len *= 2;
 
@@ -2828,9 +2828,9 @@ createSymLinkRetry:
        data_offset = (char *) (&pSMB->hdr.Protocol) + offset;
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                name_len_target =
-                   cifs_strtoUTF16((__le16 *) data_offset, toName, PATH_MAX
-                                   /* find define for this maxpathcomponent */
-                                   , nls_codepage);
+                   cifsConvertToUTF16((__le16 *) data_offset, toName,
+                               /* find define for this maxpathcomponent */
+                                       PATH_MAX, nls_codepage, remap);
                name_len_target++;      /* trailing null */
                name_len_target *= 2;
        } else {        /* BB improve the check for buffer overruns BB */
@@ -3034,7 +3034,7 @@ winCreateHardLinkRetry:
 int
 CIFSSMBUnixQuerySymLink(const unsigned int xid, struct cifs_tcon *tcon,
                        const unsigned char *searchName, char **symlinkinfo,
-                       const struct nls_table *nls_codepage)
+                       const struct nls_table *nls_codepage, int remap)
 {
 /* SMB_QUERY_FILE_UNIX_LINK */
        TRANSACTION2_QPI_REQ *pSMB = NULL;
@@ -3055,8 +3055,9 @@ querySymLinkRetry:
 
        if (pSMB->hdr.Flags2 & SMBFLG2_UNICODE) {
                name_len =
-                       cifs_strtoUTF16((__le16 *) pSMB->FileName, searchName,
-                                       PATH_MAX, nls_codepage);
+                       cifsConvertToUTF16((__le16 *) pSMB->FileName,
+                                          searchName, PATH_MAX, nls_codepage,
+                                          remap);
                name_len++;     /* trailing null */
                name_len *= 2;
        } else {        /* BB improve the check for buffer overruns BB */
@@ -4917,7 +4918,7 @@ getDFSRetry:
                strncpy(pSMB->RequestFileName, search_name, name_len);
        }
 
-       if (ses->server && ses->server->sign)
+       if (ses->server->sign)
                pSMB->hdr.Flags2 |= SMBFLG2_SECURITY_SIGNATURE;
 
        pSMB->hdr.Uid = ses->Suid;
index f3bfe08e177b6c86a4f1a99a8905f1b417f82af5..8383d5ea42028dac6788e642b6c3ed0f61459d51 100644 (file)
@@ -386,6 +386,7 @@ cifs_reconnect(struct TCP_Server_Info *server)
                rc = generic_ip_connect(server);
                if (rc) {
                        cifs_dbg(FYI, "reconnect error %d\n", rc);
+                       mutex_unlock(&server->srv_mutex);
                        msleep(3000);
                } else {
                        atomic_inc(&tcpSesReconnectCount);
@@ -393,8 +394,8 @@ cifs_reconnect(struct TCP_Server_Info *server)
                        if (server->tcpStatus != CifsExiting)
                                server->tcpStatus = CifsNeedNegotiate;
                        spin_unlock(&GlobalMid_Lock);
+                       mutex_unlock(&server->srv_mutex);
                }
-               mutex_unlock(&server->srv_mutex);
        } while (server->tcpStatus == CifsNeedReconnect);
 
        return rc;
index 338d56936f6af694b7085284a38e7b751ba7eb66..c3eb998a99bd18a2ed9b7b843c99be15fedab9df 100644 (file)
@@ -620,8 +620,7 @@ int cifs_mknod(struct inode *inode, struct dentry *direntry, umode_t mode,
                }
                rc = CIFSSMBUnixSetPathInfo(xid, tcon, full_path, &args,
                                            cifs_sb->local_nls,
-                                           cifs_sb->mnt_cifs_flags &
-                                               CIFS_MOUNT_MAP_SPECIAL_CHR);
+                                           cifs_remap(cifs_sb));
                if (rc)
                        goto mknod_out;
 
index cafbf10521d5017074196e02ad37218939d0ab70..3f50cee79df9d3318209e19281acef536b34af37 100644 (file)
@@ -140,8 +140,7 @@ int cifs_posix_open(char *full_path, struct inode **pinode,
        posix_flags = cifs_posix_convert_flags(f_flags);
        rc = CIFSPOSIXCreate(xid, tcon, posix_flags, mode, pnetfid, presp_data,
                             poplock, full_path, cifs_sb->local_nls,
-                            cifs_sb->mnt_cifs_flags &
-                                       CIFS_MOUNT_MAP_SPECIAL_CHR);
+                            cifs_remap(cifs_sb));
        cifs_put_tlink(tlink);
 
        if (rc)
@@ -1553,8 +1552,8 @@ cifs_setlk(struct file *file, struct file_lock *flock, __u32 type,
                rc = server->ops->mand_unlock_range(cfile, flock, xid);
 
 out:
-       if (flock->fl_flags & FL_POSIX)
-               posix_lock_file_wait(file, flock);
+       if (flock->fl_flags & FL_POSIX && !rc)
+               rc = posix_lock_file_wait(file, flock);
        return rc;
 }
 
index 55b58112d122248b92305ea00eb66c6715a40b03..f621b44cb8009fe87bf631e0a96c941fe63d3408 100644 (file)
@@ -373,8 +373,7 @@ int cifs_get_inode_info_unix(struct inode **pinode,
 
        /* could have done a find first instead but this returns more info */
        rc = CIFSSMBUnixQPathInfo(xid, tcon, full_path, &find_data,
-                                 cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
-                                       CIFS_MOUNT_MAP_SPECIAL_CHR);
+                                 cifs_sb->local_nls, cifs_remap(cifs_sb));
        cifs_put_tlink(tlink);
 
        if (!rc) {
@@ -402,9 +401,25 @@ int cifs_get_inode_info_unix(struct inode **pinode,
                        rc = -ENOMEM;
        } else {
                /* we already have inode, update it */
+
+               /* if uniqueid is different, return error */
+               if (unlikely(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM &&
+                   CIFS_I(*pinode)->uniqueid != fattr.cf_uniqueid)) {
+                       rc = -ESTALE;
+                       goto cgiiu_exit;
+               }
+
+               /* if filetype is different, return error */
+               if (unlikely(((*pinode)->i_mode & S_IFMT) !=
+                   (fattr.cf_mode & S_IFMT))) {
+                       rc = -ESTALE;
+                       goto cgiiu_exit;
+               }
+
                cifs_fattr_to_inode(*pinode, &fattr);
        }
 
+cgiiu_exit:
        return rc;
 }
 
@@ -839,6 +854,15 @@ cifs_get_inode_info(struct inode **inode, const char *full_path,
                if (!*inode)
                        rc = -ENOMEM;
        } else {
+               /* we already have inode, update it */
+
+               /* if filetype is different, return error */
+               if (unlikely(((*inode)->i_mode & S_IFMT) !=
+                   (fattr.cf_mode & S_IFMT))) {
+                       rc = -ESTALE;
+                       goto cgii_exit;
+               }
+
                cifs_fattr_to_inode(*inode, &fattr);
        }
 
@@ -2215,8 +2239,7 @@ cifs_setattr_unix(struct dentry *direntry, struct iattr *attrs)
                pTcon = tlink_tcon(tlink);
                rc = CIFSSMBUnixSetPathInfo(xid, pTcon, full_path, args,
                                    cifs_sb->local_nls,
-                                   cifs_sb->mnt_cifs_flags &
-                                       CIFS_MOUNT_MAP_SPECIAL_CHR);
+                                   cifs_remap(cifs_sb));
                cifs_put_tlink(tlink);
        }
 
index 252e672d56043468fb8f906ce371acef27d74db0..e3548f73bdeaa980ef1c282246688e1e3f5f21e8 100644 (file)
@@ -626,8 +626,8 @@ cifs_hl_exit:
        return rc;
 }
 
-void *
-cifs_follow_link(struct dentry *direntry, struct nameidata *nd)
+const char *
+cifs_follow_link(struct dentry *direntry, void **cookie)
 {
        struct inode *inode = d_inode(direntry);
        int rc = -ENOMEM;
@@ -643,16 +643,18 @@ cifs_follow_link(struct dentry *direntry, struct nameidata *nd)
 
        tlink = cifs_sb_tlink(cifs_sb);
        if (IS_ERR(tlink)) {
-               rc = PTR_ERR(tlink);
-               tlink = NULL;
-               goto out;
+               free_xid(xid);
+               return ERR_CAST(tlink);
        }
        tcon = tlink_tcon(tlink);
        server = tcon->ses->server;
 
        full_path = build_path_from_dentry(direntry);
-       if (!full_path)
-               goto out;
+       if (!full_path) {
+               free_xid(xid);
+               cifs_put_tlink(tlink);
+               return ERR_PTR(-ENOMEM);
+       }
 
        cifs_dbg(FYI, "Full path: %s inode = 0x%p\n", full_path, inode);
 
@@ -670,17 +672,13 @@ cifs_follow_link(struct dentry *direntry, struct nameidata *nd)
                                                &target_path, cifs_sb);
 
        kfree(full_path);
-out:
+       free_xid(xid);
+       cifs_put_tlink(tlink);
        if (rc != 0) {
                kfree(target_path);
-               target_path = ERR_PTR(rc);
+               return ERR_PTR(rc);
        }
-
-       free_xid(xid);
-       if (tlink)
-               cifs_put_tlink(tlink);
-       nd_set_link(nd, target_path);
-       return NULL;
+       return *cookie = target_path;
 }
 
 int
@@ -717,7 +715,8 @@ cifs_symlink(struct inode *inode, struct dentry *direntry, const char *symname)
                rc = create_mf_symlink(xid, pTcon, cifs_sb, full_path, symname);
        else if (pTcon->unix_ext)
                rc = CIFSUnixCreateSymLink(xid, pTcon, full_path, symname,
-                                          cifs_sb->local_nls);
+                                          cifs_sb->local_nls,
+                                          cifs_remap(cifs_sb));
        /* else
           rc = CIFSCreateReparseSymLink(xid, pTcon, fromName, toName,
                                        cifs_sb_target->local_nls); */
index b4a47237486b883851e889e78505bd1179c7842d..b1eede3678a91d8d1ea3e350cb035cabf1da7ba7 100644 (file)
@@ -90,6 +90,8 @@ cifs_prime_dcache(struct dentry *parent, struct qstr *name,
        if (dentry) {
                inode = d_inode(dentry);
                if (inode) {
+                       if (d_mountpoint(dentry))
+                               goto out;
                        /*
                         * If we're generating inode numbers, then we don't
                         * want to clobber the existing one with the one that
index 7bfdd6066276256fc03855cd809f63c167d3991b..fc537c29044edd8a158bb130a65e371370826164 100644 (file)
@@ -960,7 +960,8 @@ cifs_query_symlink(const unsigned int xid, struct cifs_tcon *tcon,
        /* Check for unix extensions */
        if (cap_unix(tcon->ses)) {
                rc = CIFSSMBUnixQuerySymLink(xid, tcon, full_path, target_path,
-                                            cifs_sb->local_nls);
+                                            cifs_sb->local_nls,
+                                            cifs_remap(cifs_sb));
                if (rc == -EREMOTE)
                        rc = cifs_unix_dfs_readlink(xid, tcon, full_path,
                                                    target_path,
index 65cd7a84c8bc3206033a917fe9d98fc939cbe1af..54cbe19d9c0871a1bb47a17edfc1d414cb383b9f 100644 (file)
@@ -110,7 +110,7 @@ smb2_hdr_assemble(struct smb2_hdr *hdr, __le16 smb2_cmd /* command */ ,
 
        /* GLOBAL_CAP_LARGE_MTU will only be set if dialect > SMB2.02 */
        /* See sections 2.2.4 and 3.2.4.1.5 of MS-SMB2 */
-       if ((tcon->ses) &&
+       if ((tcon->ses) && (tcon->ses->server) &&
            (tcon->ses->server->capabilities & SMB2_GLOBAL_CAP_LARGE_MTU))
                hdr->CreditCharge = cpu_to_le16(1);
        /* else CreditCharge MBZ */
index cc9f2546ea4a041273654b6076d1fbc774d447e4..ec5c8325b503d1a1602863769ae43c067d13a047 100644 (file)
@@ -279,36 +279,27 @@ static int configfs_getlink(struct dentry *dentry, char * path)
 
 }
 
-static void *configfs_follow_link(struct dentry *dentry, struct nameidata *nd)
+static const char *configfs_follow_link(struct dentry *dentry, void **cookie)
 {
-       int error = -ENOMEM;
        unsigned long page = get_zeroed_page(GFP_KERNEL);
+       int error;
 
-       if (page) {
-               error = configfs_getlink(dentry, (char *)page);
-               if (!error) {
-                       nd_set_link(nd, (char *)page);
-                       return (void *)page;
-               }
-       }
-
-       nd_set_link(nd, ERR_PTR(error));
-       return NULL;
-}
+       if (!page)
+               return ERR_PTR(-ENOMEM);
 
-static void configfs_put_link(struct dentry *dentry, struct nameidata *nd,
-                             void *cookie)
-{
-       if (cookie) {
-               unsigned long page = (unsigned long)cookie;
-               free_page(page);
+       error = configfs_getlink(dentry, (char *)page);
+       if (!error) {
+               return *cookie = (void *)page;
        }
+
+       free_page(page);
+       return ERR_PTR(error);
 }
 
 const struct inode_operations configfs_symlink_inode_operations = {
        .follow_link = configfs_follow_link,
        .readlink = generic_readlink,
-       .put_link = configfs_put_link,
+       .put_link = free_page_put_link,
        .setattr = configfs_setattr,
 };
 
index 656ce522a218f29850e2415b0a038c8208a5dd52..592c4b582495b515c52a2aa3458be3422953c111 100644 (file)
@@ -322,17 +322,17 @@ static void dentry_free(struct dentry *dentry)
 }
 
 /**
- * dentry_rcuwalk_barrier - invalidate in-progress rcu-walk lookups
+ * dentry_rcuwalk_invalidate - invalidate in-progress rcu-walk lookups
  * @dentry: the target dentry
  * After this call, in-progress rcu-walk path lookup will fail. This
  * should be called after unhashing, and after changing d_inode (if
  * the dentry has not already been unhashed).
  */
-static inline void dentry_rcuwalk_barrier(struct dentry *dentry)
+static inline void dentry_rcuwalk_invalidate(struct dentry *dentry)
 {
-       assert_spin_locked(&dentry->d_lock);
-       /* Go through a barrier */
-       write_seqcount_barrier(&dentry->d_seq);
+       lockdep_assert_held(&dentry->d_lock);
+       /* Go through am invalidation barrier */
+       write_seqcount_invalidate(&dentry->d_seq);
 }
 
 /*
@@ -372,7 +372,7 @@ static void dentry_unlink_inode(struct dentry * dentry)
        struct inode *inode = dentry->d_inode;
        __d_clear_type_and_inode(dentry);
        hlist_del_init(&dentry->d_u.d_alias);
-       dentry_rcuwalk_barrier(dentry);
+       dentry_rcuwalk_invalidate(dentry);
        spin_unlock(&dentry->d_lock);
        spin_unlock(&inode->i_lock);
        if (!inode->i_nlink)
@@ -494,7 +494,7 @@ void __d_drop(struct dentry *dentry)
                __hlist_bl_del(&dentry->d_hash);
                dentry->d_hash.pprev = NULL;
                hlist_bl_unlock(b);
-               dentry_rcuwalk_barrier(dentry);
+               dentry_rcuwalk_invalidate(dentry);
        }
 }
 EXPORT_SYMBOL(__d_drop);
@@ -1239,13 +1239,13 @@ ascend:
                /* might go back up the wrong parent if we have had a rename. */
                if (need_seqretry(&rename_lock, seq))
                        goto rename_retry;
-               next = child->d_child.next;
-               while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED)) {
+               /* go into the first sibling still alive */
+               do {
+                       next = child->d_child.next;
                        if (next == &this_parent->d_subdirs)
                                goto ascend;
                        child = list_entry(next, struct dentry, d_child);
-                       next = next->next;
-               }
+               } while (unlikely(child->d_flags & DCACHE_DENTRY_KILLED));
                rcu_read_unlock();
                goto resume;
        }
@@ -1752,7 +1752,7 @@ static void __d_instantiate(struct dentry *dentry, struct inode *inode)
        if (inode)
                hlist_add_head(&dentry->d_u.d_alias, &inode->i_dentry);
        __d_set_inode_and_type(dentry, inode, add_flags);
-       dentry_rcuwalk_barrier(dentry);
+       dentry_rcuwalk_invalidate(dentry);
        spin_unlock(&dentry->d_lock);
        fsnotify_d_instantiate(dentry, inode);
 }
index 830a7e76f5c64067e46fad8fd368e9112ddae7a9..284f9aa0028b8dd46b9897ababc5e826c185c608 100644 (file)
@@ -17,7 +17,6 @@
 #include <linux/fs.h>
 #include <linux/seq_file.h>
 #include <linux/pagemap.h>
-#include <linux/namei.h>
 #include <linux/debugfs.h>
 #include <linux/io.h>
 #include <linux/slab.h>
@@ -43,17 +42,6 @@ const struct file_operations debugfs_file_operations = {
        .llseek =       noop_llseek,
 };
 
-static void *debugfs_follow_link(struct dentry *dentry, struct nameidata *nd)
-{
-       nd_set_link(nd, d_inode(dentry)->i_private);
-       return NULL;
-}
-
-const struct inode_operations debugfs_link_operations = {
-       .readlink       = generic_readlink,
-       .follow_link    = debugfs_follow_link,
-};
-
 static int debugfs_u8_set(void *data, u64 val)
 {
        *(u8 *)data = val;
index c1e7ffb0dab658ecd21c449bf36467b14e0b75d6..7eaec88ea970d1a6ea8422465857deaefd2b7052 100644 (file)
@@ -174,7 +174,7 @@ static void debugfs_evict_inode(struct inode *inode)
        truncate_inode_pages_final(&inode->i_data);
        clear_inode(inode);
        if (S_ISLNK(inode->i_mode))
-               kfree(inode->i_private);
+               kfree(inode->i_link);
 }
 
 static const struct super_operations debugfs_super_operations = {
@@ -511,8 +511,8 @@ struct dentry *debugfs_create_symlink(const char *name, struct dentry *parent,
                return failed_creating(dentry);
        }
        inode->i_mode = S_IFLNK | S_IRWXUGO;
-       inode->i_op = &debugfs_link_operations;
-       inode->i_private = link;
+       inode->i_op = &simple_symlink_inode_operations;
+       inode->i_link = link;
        d_instantiate(dentry, inode);
        return end_creating(dentry);
 }
index fc850b55db67a27a99663596e1e8c711c8d71237..3c4db1172d222840b8cf0fcd50a61437ebf5f4c7 100644 (file)
@@ -170,7 +170,6 @@ out_unlock:
  * @directory_inode: inode of the new file's dentry's parent in ecryptfs
  * @ecryptfs_dentry: New file's dentry in ecryptfs
  * @mode: The mode of the new file
- * @nd: nameidata of ecryptfs' parent's dentry & vfsmount
  *
  * Creates the underlying file and the eCryptfs inode which will link to
  * it. It will also update the eCryptfs directory inode to mimic the
@@ -384,7 +383,7 @@ static int ecryptfs_lookup_interpose(struct dentry *dentry,
  * ecryptfs_lookup
  * @ecryptfs_dir_inode: The eCryptfs directory inode
  * @ecryptfs_dentry: The eCryptfs dentry that we are looking up
- * @ecryptfs_nd: nameidata; may be NULL
+ * @flags: lookup flags
  *
  * Find a file on disk. If the file does not exist, then we'll add it to the
  * dentry cache and continue on to read it from the disk.
@@ -675,18 +674,16 @@ out:
        return rc ? ERR_PTR(rc) : buf;
 }
 
-static void *ecryptfs_follow_link(struct dentry *dentry, struct nameidata *nd)
+static const char *ecryptfs_follow_link(struct dentry *dentry, void **cookie)
 {
        size_t len;
        char *buf = ecryptfs_readlink_lower(dentry, &len);
        if (IS_ERR(buf))
-               goto out;
+               return buf;
        fsstack_copy_attr_atime(d_inode(dentry),
                                d_inode(ecryptfs_dentry_to_lower(dentry)));
        buf[len] = '\0';
-out:
-       nd_set_link(nd, buf);
-       return NULL;
+       return *cookie = buf;
 }
 
 /**
index 49a1c61433b73722683cad25eef1fb92045e265a..1977c2a553aca711ba145d1670ea9a84fd45ea84 100644 (file)
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -659,6 +659,9 @@ int setup_arg_pages(struct linux_binprm *bprm,
        if (stack_base > STACK_SIZE_MAX)
                stack_base = STACK_SIZE_MAX;
 
+       /* Add space for stack randomization. */
+       stack_base += (STACK_RND_MASK << PAGE_SHIFT);
+
        /* Make sure we didn't let the argument array grow too large. */
        if (vma->vm_end - vma->vm_start > stack_base)
                return -ENOMEM;
index b47c7b8dc275429e87b1b35fe0a36af2e820cdf3..a364fd0965ec6a35e27c386b22916a20de9469b3 100644 (file)
@@ -16,5 +16,5 @@
 libore-y := ore.o ore_raid.o
 obj-$(CONFIG_ORE) += libore.o
 
-exofs-y := inode.o file.o symlink.o namei.o dir.o super.o sys.o
+exofs-y := inode.o file.o namei.o dir.o super.o sys.o
 obj-$(CONFIG_EXOFS_FS) += exofs.o
index ad9cac670a470d163001c7aa2227db46afc607d3..2e86086bc9403efe99a25ab0df439db0c714eb4e 100644 (file)
@@ -207,10 +207,6 @@ extern const struct address_space_operations exofs_aops;
 extern const struct inode_operations exofs_dir_inode_operations;
 extern const struct inode_operations exofs_special_inode_operations;
 
-/* symlink.c         */
-extern const struct inode_operations exofs_symlink_inode_operations;
-extern const struct inode_operations exofs_fast_symlink_inode_operations;
-
 /* exofs_init_comps will initialize an ore_components device array
  * pointing to a single ore_comp struct, and a round-robin view
  * of the device table.
index 786e4cc8c889cc8903f7d734ee3e255c0aeb24f8..73c64daa0f5517b4ff8271bd17e25740bb83d506 100644 (file)
@@ -1222,10 +1222,11 @@ struct inode *exofs_iget(struct super_block *sb, unsigned long ino)
                inode->i_fop = &exofs_dir_operations;
                inode->i_mapping->a_ops = &exofs_aops;
        } else if (S_ISLNK(inode->i_mode)) {
-               if (exofs_inode_is_fast_symlink(inode))
-                       inode->i_op = &exofs_fast_symlink_inode_operations;
-               else {
-                       inode->i_op = &exofs_symlink_inode_operations;
+               if (exofs_inode_is_fast_symlink(inode)) {
+                       inode->i_op = &simple_symlink_inode_operations;
+                       inode->i_link = (char *)oi->i_data;
+               } else {
+                       inode->i_op = &page_symlink_inode_operations;
                        inode->i_mapping->a_ops = &exofs_aops;
                }
        } else {
index 5ae25e43119185e04d19a287e534921cc61bbc1c..09a6bb1ad63c840b91ef56114871753454046292 100644 (file)
@@ -113,7 +113,7 @@ static int exofs_symlink(struct inode *dir, struct dentry *dentry,
        oi = exofs_i(inode);
        if (l > sizeof(oi->i_data)) {
                /* slow symlink */
-               inode->i_op = &exofs_symlink_inode_operations;
+               inode->i_op = &page_symlink_inode_operations;
                inode->i_mapping->a_ops = &exofs_aops;
                memset(oi->i_data, 0, sizeof(oi->i_data));
 
@@ -122,7 +122,8 @@ static int exofs_symlink(struct inode *dir, struct dentry *dentry,
                        goto out_fail;
        } else {
                /* fast symlink */
-               inode->i_op = &exofs_fast_symlink_inode_operations;
+               inode->i_op = &simple_symlink_inode_operations;
+               inode->i_link = (char *)oi->i_data;
                memcpy(oi->i_data, symname, l);
                inode->i_size = l-1;
        }
diff --git a/fs/exofs/symlink.c b/fs/exofs/symlink.c
deleted file mode 100644 (file)
index 6f6f3a4..0000000
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright (C) 2005, 2006
- * Avishay Traeger (avishay@gmail.com)
- * Copyright (C) 2008, 2009
- * Boaz Harrosh <ooo@electrozaur.com>
- *
- * Copyrights for code taken from ext2:
- *     Copyright (C) 1992, 1993, 1994, 1995
- *     Remy Card (card@masi.ibp.fr)
- *     Laboratoire MASI - Institut Blaise Pascal
- *     Universite Pierre et Marie Curie (Paris VI)
- *     from
- *     linux/fs/minix/inode.c
- *     Copyright (C) 1991, 1992  Linus Torvalds
- *
- * This file is part of exofs.
- *
- * exofs is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation.  Since it is based on ext2, and the only
- * valid version of GPL for the Linux kernel is version 2, the only valid
- * version of GPL for exofs is version 2.
- *
- * exofs is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with exofs; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
- */
-
-#include <linux/namei.h>
-
-#include "exofs.h"
-
-static void *exofs_follow_link(struct dentry *dentry, struct nameidata *nd)
-{
-       struct exofs_i_info *oi = exofs_i(d_inode(dentry));
-
-       nd_set_link(nd, (char *)oi->i_data);
-       return NULL;
-}
-
-const struct inode_operations exofs_symlink_inode_operations = {
-       .readlink       = generic_readlink,
-       .follow_link    = page_follow_link_light,
-       .put_link       = page_put_link,
-};
-
-const struct inode_operations exofs_fast_symlink_inode_operations = {
-       .readlink       = generic_readlink,
-       .follow_link    = exofs_follow_link,
-};
index f460ae36d5b78addfd9cc1a6eb6c4287887a946e..5c09776d347fc363c4f456862eb2361d717e46bd 100644 (file)
@@ -1403,6 +1403,7 @@ struct inode *ext2_iget (struct super_block *sb, unsigned long ino)
                        inode->i_mapping->a_ops = &ext2_aops;
        } else if (S_ISLNK(inode->i_mode)) {
                if (ext2_inode_is_fast_symlink(inode)) {
+                       inode->i_link = (char *)ei->i_data;
                        inode->i_op = &ext2_fast_symlink_inode_operations;
                        nd_terminate_link(ei->i_data, inode->i_size,
                                sizeof(ei->i_data) - 1);
index 3e074a9ccbe6dd048c288ae8162229b0af26d176..13ec54a99c962a85bc628732102fdb554f750d1c 100644 (file)
@@ -189,7 +189,8 @@ static int ext2_symlink (struct inode * dir, struct dentry * dentry,
        } else {
                /* fast symlink */
                inode->i_op = &ext2_fast_symlink_inode_operations;
-               memcpy((char*)(EXT2_I(inode)->i_data),symname,l);
+               inode->i_link = (char*)EXT2_I(inode)->i_data;
+               memcpy(inode->i_link, symname, l);
                inode->i_size = l-1;
        }
        mark_inode_dirty(inode);
index 20608f17c2e5144ed6283e6afe7f23dae6f51031..ae17179f3810b2dd635c81203643a14f8f4c0c10 100644 (file)
 
 #include "ext2.h"
 #include "xattr.h"
-#include <linux/namei.h>
-
-static void *ext2_follow_link(struct dentry *dentry, struct nameidata *nd)
-{
-       struct ext2_inode_info *ei = EXT2_I(d_inode(dentry));
-       nd_set_link(nd, (char *)ei->i_data);
-       return NULL;
-}
 
 const struct inode_operations ext2_symlink_inode_operations = {
        .readlink       = generic_readlink,
@@ -43,7 +35,7 @@ const struct inode_operations ext2_symlink_inode_operations = {
  
 const struct inode_operations ext2_fast_symlink_inode_operations = {
        .readlink       = generic_readlink,
-       .follow_link    = ext2_follow_link,
+       .follow_link    = simple_follow_link,
        .setattr        = ext2_setattr,
 #ifdef CONFIG_EXT2_FS_XATTR
        .setxattr       = generic_setxattr,
index 2ee2dc4351d1630b375da3b9aaa062840ce1afc9..6c7e5468a2f807d68e48b7c43b13a0626a5f4aee 100644 (file)
@@ -2999,6 +2999,7 @@ struct inode *ext3_iget(struct super_block *sb, unsigned long ino)
                        inode->i_op = &ext3_fast_symlink_inode_operations;
                        nd_terminate_link(ei->i_data, inode->i_size,
                                sizeof(ei->i_data) - 1);
+                       inode->i_link = (char *)ei->i_data;
                } else {
                        inode->i_op = &ext3_symlink_inode_operations;
                        ext3_set_aops(inode);
index 4264b9bd0002f199593308feaf7906292311e42f..c9e767cd4b67991e3ecf4bebc77b8eaa6be2f752 100644 (file)
@@ -2308,7 +2308,8 @@ retry:
                }
        } else {
                inode->i_op = &ext3_fast_symlink_inode_operations;
-               memcpy((char*)&EXT3_I(inode)->i_data,symname,l);
+               inode->i_link = (char*)&EXT3_I(inode)->i_data;
+               memcpy(inode->i_link, symname, l);
                inode->i_size = l-1;
        }
        EXT3_I(inode)->i_disksize = inode->i_size;
index ea96df3c58db199915e6e9b60b1d7beb9b931150..c08c59094ae61f3172c58e237477a7cb7518e50f 100644 (file)
  *  ext3 symlink handling code
  */
 
-#include <linux/namei.h>
 #include "ext3.h"
 #include "xattr.h"
 
-static void * ext3_follow_link(struct dentry *dentry, struct nameidata *nd)
-{
-       struct ext3_inode_info *ei = EXT3_I(d_inode(dentry));
-       nd_set_link(nd, (char*)ei->i_data);
-       return NULL;
-}
-
 const struct inode_operations ext3_symlink_inode_operations = {
        .readlink       = generic_readlink,
        .follow_link    = page_follow_link_light,
@@ -43,7 +35,7 @@ const struct inode_operations ext3_symlink_inode_operations = {
 
 const struct inode_operations ext3_fast_symlink_inode_operations = {
        .readlink       = generic_readlink,
-       .follow_link    = ext3_follow_link,
+       .follow_link    = simple_follow_link,
        .setattr        = ext3_setattr,
 #ifdef CONFIG_EXT3_FS_XATTR
        .setxattr       = generic_setxattr,
index 009a0590b20fb328c5829573ee77440dabb8e83e..0a3b72d1d458bd68834465a70fff3d323959f997 100644 (file)
@@ -2847,6 +2847,7 @@ extern int ext4_mpage_readpages(struct address_space *mapping,
                                unsigned nr_pages);
 
 /* symlink.c */
+extern const struct inode_operations ext4_encrypted_symlink_inode_operations;
 extern const struct inode_operations ext4_symlink_inode_operations;
 extern const struct inode_operations ext4_fast_symlink_inode_operations;
 
@@ -2889,7 +2890,6 @@ extern int ext4_map_blocks(handle_t *handle, struct inode *inode,
                           struct ext4_map_blocks *map, int flags);
 extern int ext4_ext_calc_metadata_amount(struct inode *inode,
                                         ext4_lblk_t lblocks);
-extern int ext4_extent_tree_init(handle_t *, struct inode *);
 extern int ext4_ext_calc_credits_for_single_extent(struct inode *inode,
                                                   int num,
                                                   struct ext4_ext_path *path);
index 3445035c7e015e9460f2cd3f74b698bf823cabb6..d4184318181878b023931078377ae17c9568a208 100644 (file)
@@ -87,6 +87,12 @@ int __ext4_journal_stop(const char *where, unsigned int line, handle_t *handle)
                ext4_put_nojournal(handle);
                return 0;
        }
+
+       if (!handle->h_transaction) {
+               err = jbd2_journal_stop(handle);
+               return handle->h_err ? handle->h_err : err;
+       }
+
        sb = handle->h_transaction->t_journal->j_private;
        err = handle->h_err;
        rc = jbd2_journal_stop(handle);
index d74e08029643d3e5eeb1f8a14d9a78c290549863..e003a1e81dc351c76465908bec21702ecc6d4b91 100644 (file)
@@ -377,7 +377,7 @@ static int ext4_valid_extent(struct inode *inode, struct ext4_extent *ext)
        ext4_lblk_t lblock = le32_to_cpu(ext->ee_block);
        ext4_lblk_t last = lblock + len - 1;
 
-       if (lblock > last)
+       if (len == 0 || lblock > last)
                return 0;
        return ext4_data_block_valid(EXT4_SB(inode->i_sb), block, len);
 }
@@ -5396,6 +5396,14 @@ int ext4_collapse_range(struct inode *inode, loff_t offset, loff_t len)
        loff_t new_size, ioffset;
        int ret;
 
+       /*
+        * We need to test this early because xfstests assumes that a
+        * collapse range of (0, 1) will return EOPNOTSUPP if the file
+        * system does not support collapse range.
+        */
+       if (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
+               return -EOPNOTSUPP;
+
        /* Collapse range works only on fs block size aligned offsets. */
        if (offset & (EXT4_CLUSTER_SIZE(sb) - 1) ||
            len & (EXT4_CLUSTER_SIZE(sb) - 1))
index 55b187c3bac1f1626cf0267734aa805b6f9a1263..5168c9b568809d81f66cd4813bc25cba5d669fc9 100644 (file)
@@ -4213,8 +4213,11 @@ struct inode *ext4_iget(struct super_block *sb, unsigned long ino)
                inode->i_op = &ext4_dir_inode_operations;
                inode->i_fop = &ext4_dir_operations;
        } else if (S_ISLNK(inode->i_mode)) {
-               if (ext4_inode_is_fast_symlink(inode) &&
-                   !ext4_encrypted_inode(inode)) {
+               if (ext4_encrypted_inode(inode)) {
+                       inode->i_op = &ext4_encrypted_symlink_inode_operations;
+                       ext4_set_aops(inode);
+               } else if (ext4_inode_is_fast_symlink(inode)) {
+                       inode->i_link = (char *)ei->i_data;
                        inode->i_op = &ext4_fast_symlink_inode_operations;
                        nd_terminate_link(ei->i_data, inode->i_size,
                                sizeof(ei->i_data) - 1);
@@ -4345,7 +4348,7 @@ static void ext4_update_other_inodes_time(struct super_block *sb,
        int inode_size = EXT4_INODE_SIZE(sb);
 
        oi.orig_ino = orig_ino;
-       ino = orig_ino & ~(inodes_per_block - 1);
+       ino = (orig_ino & ~(inodes_per_block - 1)) + 1;
        for (i = 0; i < inodes_per_block; i++, ino++, buf += inode_size) {
                if (ino == orig_ino)
                        continue;
index 814f3beb436965f116b7555ee8cf9ac30c3f0165..5fdb9f6aa869445ca9893751393e4822b2e0ed63 100644 (file)
@@ -3206,10 +3206,12 @@ static int ext4_symlink(struct inode *dir,
                        goto err_drop_inode;
                sd->len = cpu_to_le16(ostr.len);
                disk_link.name = (char *) sd;
+               inode->i_op = &ext4_encrypted_symlink_inode_operations;
        }
 
        if ((disk_link.len > EXT4_N_BLOCKS * 4)) {
-               inode->i_op = &ext4_symlink_inode_operations;
+               if (!encryption_required)
+                       inode->i_op = &ext4_symlink_inode_operations;
                ext4_set_aops(inode);
                /*
                 * We cannot call page_symlink() with transaction started
@@ -3249,9 +3251,10 @@ static int ext4_symlink(struct inode *dir,
        } else {
                /* clear the extent format for fast symlink */
                ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS);
-               inode->i_op = encryption_required ?
-                       &ext4_symlink_inode_operations :
-                       &ext4_fast_symlink_inode_operations;
+               if (!encryption_required) {
+                       inode->i_op = &ext4_fast_symlink_inode_operations;
+                       inode->i_link = (char *)&EXT4_I(inode)->i_data;
+               }
                memcpy((char *)&EXT4_I(inode)->i_data, disk_link.name,
                       disk_link.len);
                inode->i_size = disk_link.len - 1;
index f06d0589ddba5db7226a38a6e0de3ef2cdee48d9..ca9d4a2fed415649cd9744fdac2a1e8bdfa1631a 100644 (file)
@@ -294,6 +294,8 @@ static void __save_error_info(struct super_block *sb, const char *func,
        struct ext4_super_block *es = EXT4_SB(sb)->s_es;
 
        EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
+       if (bdev_read_only(sb->s_bdev))
+               return;
        es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
        es->s_last_error_time = cpu_to_le32(get_seconds());
        strncpy(es->s_last_error_func, func, sizeof(es->s_last_error_func));
index 187b789203142d6b444b264acd44798427626b41..ba5bd18a9825242fdfc9e8ee4cb05f1cc7cd05f7 100644 (file)
@@ -23,7 +23,7 @@
 #include "xattr.h"
 
 #ifdef CONFIG_EXT4_FS_ENCRYPTION
-static void *ext4_follow_link(struct dentry *dentry, struct nameidata *nd)
+static const char *ext4_follow_link(struct dentry *dentry, void **cookie)
 {
        struct page *cpage = NULL;
        char *caddr, *paddr = NULL;
@@ -35,12 +35,9 @@ static void *ext4_follow_link(struct dentry *dentry, struct nameidata *nd)
        int res;
        u32 plen, max_size = inode->i_sb->s_blocksize;
 
-       if (!ext4_encrypted_inode(inode))
-               return page_follow_link_light(dentry, nd);
-
        ctx = ext4_get_fname_crypto_ctx(inode, inode->i_sb->s_blocksize);
        if (IS_ERR(ctx))
-               return ctx;
+               return ERR_CAST(ctx);
 
        if (ext4_inode_is_fast_symlink(inode)) {
                caddr = (char *) EXT4_I(inode)->i_data;
@@ -49,7 +46,7 @@ static void *ext4_follow_link(struct dentry *dentry, struct nameidata *nd)
                cpage = read_mapping_page(inode->i_mapping, 0, NULL);
                if (IS_ERR(cpage)) {
                        ext4_put_fname_crypto_ctx(&ctx);
-                       return cpage;
+                       return ERR_CAST(cpage);
                }
                caddr = kmap(cpage);
                caddr[size] = 0;
@@ -80,13 +77,12 @@ static void *ext4_follow_link(struct dentry *dentry, struct nameidata *nd)
        /* Null-terminate the name */
        if (res <= plen)
                paddr[res] = '\0';
-       nd_set_link(nd, paddr);
        ext4_put_fname_crypto_ctx(&ctx);
        if (cpage) {
                kunmap(cpage);
                page_cache_release(cpage);
        }
-       return NULL;
+       return *cookie = paddr;
 errout:
        ext4_put_fname_crypto_ctx(&ctx);
        if (cpage) {
@@ -97,36 +93,22 @@ errout:
        return ERR_PTR(res);
 }
 
-static void ext4_put_link(struct dentry *dentry, struct nameidata *nd,
-                         void *cookie)
-{
-       struct page *page = cookie;
-
-       if (!page) {
-               kfree(nd_get_link(nd));
-       } else {
-               kunmap(page);
-               page_cache_release(page);
-       }
-}
+const struct inode_operations ext4_encrypted_symlink_inode_operations = {
+       .readlink       = generic_readlink,
+       .follow_link    = ext4_follow_link,
+       .put_link       = kfree_put_link,
+       .setattr        = ext4_setattr,
+       .setxattr       = generic_setxattr,
+       .getxattr       = generic_getxattr,
+       .listxattr      = ext4_listxattr,
+       .removexattr    = generic_removexattr,
+};
 #endif
 
-static void *ext4_follow_fast_link(struct dentry *dentry, struct nameidata *nd)
-{
-       struct ext4_inode_info *ei = EXT4_I(d_inode(dentry));
-       nd_set_link(nd, (char *) ei->i_data);
-       return NULL;
-}
-
 const struct inode_operations ext4_symlink_inode_operations = {
        .readlink       = generic_readlink,
-#ifdef CONFIG_EXT4_FS_ENCRYPTION
-       .follow_link    = ext4_follow_link,
-       .put_link       = ext4_put_link,
-#else
        .follow_link    = page_follow_link_light,
        .put_link       = page_put_link,
-#endif
        .setattr        = ext4_setattr,
        .setxattr       = generic_setxattr,
        .getxattr       = generic_getxattr,
@@ -136,7 +118,7 @@ const struct inode_operations ext4_symlink_inode_operations = {
 
 const struct inode_operations ext4_fast_symlink_inode_operations = {
        .readlink       = generic_readlink,
-       .follow_link    = ext4_follow_fast_link,
+       .follow_link    = simple_follow_link,
        .setattr        = ext4_setattr,
        .setxattr       = generic_setxattr,
        .getxattr       = generic_getxattr,
index b91b0e10678eb1adcb87e45628f7794caa0dcc9b..1e1aae669fa86275d7b6c932aeda89ded129ce17 100644 (file)
@@ -1513,6 +1513,7 @@ static int f2fs_write_data_pages(struct address_space *mapping,
 {
        struct inode *inode = mapping->host;
        struct f2fs_sb_info *sbi = F2FS_I_SB(inode);
+       bool locked = false;
        int ret;
        long diff;
 
@@ -1533,7 +1534,13 @@ static int f2fs_write_data_pages(struct address_space *mapping,
 
        diff = nr_pages_to_write(sbi, DATA, wbc);
 
+       if (!S_ISDIR(inode->i_mode)) {
+               mutex_lock(&sbi->writepages);
+               locked = true;
+       }
        ret = write_cache_pages(mapping, wbc, __f2fs_writepage, mapping);
+       if (locked)
+               mutex_unlock(&sbi->writepages);
 
        f2fs_submit_merged_bio(sbi, DATA, WRITE);
 
index d8921cf2ba9a04454e553a75535dfa84d2b4da67..8de34ab6d5b1c5340276a5780bac1c133ac582fb 100644 (file)
@@ -625,6 +625,7 @@ struct f2fs_sb_info {
        struct mutex cp_mutex;                  /* checkpoint procedure lock */
        struct rw_semaphore cp_rwsem;           /* blocking FS operations */
        struct rw_semaphore node_write;         /* locking node writes */
+       struct mutex writepages;                /* mutex for writepages() */
        wait_queue_head_t cp_wait;
 
        struct inode_management im[MAX_INO_ENTRY];      /* manage inode cache */
index 7e3794edae42ab5c656f2ae9fec69e7431476b35..71765d062914a515fc7603843cabd2b456d5a189 100644 (file)
@@ -296,21 +296,15 @@ fail:
        return err;
 }
 
-static void *f2fs_follow_link(struct dentry *dentry, struct nameidata *nd)
+static const char *f2fs_follow_link(struct dentry *dentry, void **cookie)
 {
-       struct page *page;
-
-       page = page_follow_link_light(dentry, nd);
-       if (IS_ERR(page))
-               return page;
-
-       /* this is broken symlink case */
-       if (*nd_get_link(nd) == 0) {
-               kunmap(page);
-               page_cache_release(page);
-               return ERR_PTR(-ENOENT);
+       const char *link = page_follow_link_light(dentry, cookie);
+       if (!IS_ERR(link) && !*link) {
+               /* this is broken symlink case */
+               page_put_link(NULL, *cookie);
+               link = ERR_PTR(-ENOENT);
        }
-       return page;
+       return link;
 }
 
 static int f2fs_symlink(struct inode *dir, struct dentry *dentry,
index 160b88346b2477466f3ed34d4d2dff2509742d7e..b2dd1b01f07634e27f42fc09e8fd627a70395e3e 100644 (file)
@@ -1035,6 +1035,7 @@ try_onemore:
        sbi->raw_super = raw_super;
        sbi->raw_super_buf = raw_super_buf;
        mutex_init(&sbi->gc_mutex);
+       mutex_init(&sbi->writepages);
        mutex_init(&sbi->cp_mutex);
        init_rwsem(&sbi->node_write);
        clear_sbi_flag(sbi, SBI_POR_DOING);
index 999ff5c3cab0edacd585447132180d5c35554e3c..d59712dfa3e701e86ff53609308e813cf8acf69e 100644 (file)
@@ -195,8 +195,9 @@ static int handle_to_path(int mountdirfd, struct file_handle __user *ufh,
                goto out_err;
        }
        /* copy the full handle */
-       if (copy_from_user(handle, ufh,
-                          sizeof(struct file_handle) +
+       *handle = f_handle;
+       if (copy_from_user(&handle->f_handle,
+                          &ufh->f_handle,
                           f_handle.handle_bytes)) {
                retval = -EFAULT;
                goto out_handle;
index 881aa3d217f007a76361ff1c23f77499bdab851e..e3dcb4467d92752af6980549fb740f97d11d1f47 100644 (file)
@@ -50,9 +50,6 @@ extern daddr_t                        vxfs_bmap1(struct inode *, long);
 /* vxfs_fshead.c */
 extern int                     vxfs_read_fshead(struct super_block *);
 
-/* vxfs_immed.c */
-extern const struct inode_operations vxfs_immed_symlink_iops;
-
 /* vxfs_inode.c */
 extern const struct address_space_operations vxfs_immed_aops;
 extern struct kmem_cache       *vxfs_inode_cachep;
index 8b9229e2ca5cb572976a839ca20f58e0514b6726..cb84f0fcc72a468c1366f498554ab4c79ac26830 100644 (file)
  */
 #include <linux/fs.h>
 #include <linux/pagemap.h>
-#include <linux/namei.h>
 
 #include "vxfs.h"
 #include "vxfs_extern.h"
 #include "vxfs_inode.h"
 
 
-static void *  vxfs_immed_follow_link(struct dentry *, struct nameidata *);
-
 static int     vxfs_immed_readpage(struct file *, struct page *);
 
-/*
- * Inode operations for immed symlinks.
- *
- * Unliked all other operations we do not go through the pagecache,
- * but do all work directly on the inode.
- */
-const struct inode_operations vxfs_immed_symlink_iops = {
-       .readlink =             generic_readlink,
-       .follow_link =          vxfs_immed_follow_link,
-};
-
 /*
  * Address space operations for immed files and directories.
  */
@@ -61,26 +47,6 @@ const struct address_space_operations vxfs_immed_aops = {
        .readpage =             vxfs_immed_readpage,
 };
 
-/**
- * vxfs_immed_follow_link - follow immed symlink
- * @dp:                dentry for the link
- * @np:                pathname lookup data for the current path walk
- *
- * Description:
- *   vxfs_immed_follow_link restarts the pathname lookup with
- *   the data obtained from @dp.
- *
- * Returns:
- *   Zero on success, else a negative error code.
- */
-static void *
-vxfs_immed_follow_link(struct dentry *dp, struct nameidata *np)
-{
-       struct vxfs_inode_info          *vip = VXFS_INO(d_inode(dp));
-       nd_set_link(np, vip->vii_immed.vi_immed);
-       return NULL;
-}
-
 /**
  * vxfs_immed_readpage - read part of an immed inode into pagecache
  * @file:      file context (unused)
index 363e3ae25f6b42c775f6c09f6251786555adeaa2..ef73ed674a27162917845b0507269bdf86b273da 100644 (file)
@@ -35,6 +35,7 @@
 #include <linux/pagemap.h>
 #include <linux/kernel.h>
 #include <linux/slab.h>
+#include <linux/namei.h>
 
 #include "vxfs.h"
 #include "vxfs_inode.h"
@@ -327,8 +328,10 @@ vxfs_iget(struct super_block *sbp, ino_t ino)
                        ip->i_op = &page_symlink_inode_operations;
                        ip->i_mapping->a_ops = &vxfs_aops;
                } else {
-                       ip->i_op = &vxfs_immed_symlink_iops;
-                       vip->vii_immed.vi_immed[ip->i_size] = '\0';
+                       ip->i_op = &simple_symlink_inode_operations;
+                       ip->i_link = vip->vii_immed.vi_immed;
+                       nd_terminate_link(ip->i_link, ip->i_size,
+                                         sizeof(vip->vii_immed.vi_immed) - 1);
                }
        } else
                init_special_inode(ip, ip->i_mode, old_decode_dev(vip->vii_rdev));
index 0572bca49f1546b3d9cd3b00fb7fc21f0369308b..5e2e08712d3ba614a46687d5688fc2f01cd835be 100644 (file)
@@ -1365,7 +1365,7 @@ static int fuse_readdir(struct file *file, struct dir_context *ctx)
        return err;
 }
 
-static char *read_link(struct dentry *dentry)
+static const char *fuse_follow_link(struct dentry *dentry, void **cookie)
 {
        struct inode *inode = d_inode(dentry);
        struct fuse_conn *fc = get_fuse_conn(inode);
@@ -1389,28 +1389,12 @@ static char *read_link(struct dentry *dentry)
                link = ERR_PTR(ret);
        } else {
                link[ret] = '\0';
+               *cookie = link;
        }
        fuse_invalidate_atime(inode);
        return link;
 }
 
-static void free_link(char *link)
-{
-       if (!IS_ERR(link))
-               free_page((unsigned long) link);
-}
-
-static void *fuse_follow_link(struct dentry *dentry, struct nameidata *nd)
-{
-       nd_set_link(nd, read_link(dentry));
-       return NULL;
-}
-
-static void fuse_put_link(struct dentry *dentry, struct nameidata *nd, void *c)
-{
-       free_link(nd_get_link(nd));
-}
-
 static int fuse_dir_open(struct inode *inode, struct file *file)
 {
        return fuse_open_common(inode, file, true);
@@ -1926,7 +1910,7 @@ static const struct inode_operations fuse_common_inode_operations = {
 static const struct inode_operations fuse_symlink_inode_operations = {
        .setattr        = fuse_setattr,
        .follow_link    = fuse_follow_link,
-       .put_link       = fuse_put_link,
+       .put_link       = free_page_put_link,
        .readlink       = generic_readlink,
        .getattr        = fuse_getattr,
        .setxattr       = fuse_setxattr,
index 1b3ca7a2e3fcfb807d4505d81bace6b698272bff..3a1461de1551d5765b7334747e69d2909ffb8d00 100644 (file)
@@ -1548,7 +1548,7 @@ out:
  * Returns: 0 on success or error code
  */
 
-static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd)
+static const char *gfs2_follow_link(struct dentry *dentry, void **cookie)
 {
        struct gfs2_inode *ip = GFS2_I(d_inode(dentry));
        struct gfs2_holder i_gh;
@@ -1561,8 +1561,7 @@ static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd)
        error = gfs2_glock_nq(&i_gh);
        if (error) {
                gfs2_holder_uninit(&i_gh);
-               nd_set_link(nd, ERR_PTR(error));
-               return NULL;
+               return ERR_PTR(error);
        }
 
        size = (unsigned int)i_size_read(&ip->i_inode);
@@ -1586,8 +1585,9 @@ static void *gfs2_follow_link(struct dentry *dentry, struct nameidata *nd)
        brelse(dibh);
 out:
        gfs2_glock_dq_uninit(&i_gh);
-       nd_set_link(nd, buf);
-       return NULL;
+       if (!IS_ERR(buf))
+               *cookie = buf;
+       return buf;
 }
 
 /**
index ef263174acd23a88f2a110b80a67afc2fab2ddc0..059597b23f677b0959d8264b83cf4c4a2cec34b7 100644 (file)
@@ -581,7 +581,7 @@ static int hostfs_create(struct inode *dir, struct dentry *dentry, umode_t mode,
        if (name == NULL)
                goto out_put;
 
-       fd = file_create(name, mode & S_IFMT);
+       fd = file_create(name, mode & 0777);
        if (fd < 0)
                error = fd;
        else
@@ -892,7 +892,7 @@ static const struct inode_operations hostfs_dir_iops = {
        .setattr        = hostfs_setattr,
 };
 
-static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
+static const char *hostfs_follow_link(struct dentry *dentry, void **cookie)
 {
        char *link = __getname();
        if (link) {
@@ -906,21 +906,18 @@ static void *hostfs_follow_link(struct dentry *dentry, struct nameidata *nd)
                }
                if (err < 0) {
                        __putname(link);
-                       link = ERR_PTR(err);
+                       return ERR_PTR(err);
                }
        } else {
-               link = ERR_PTR(-ENOMEM);
+               return ERR_PTR(-ENOMEM);
        }
 
-       nd_set_link(nd, link);
-       return NULL;
+       return *cookie = link;
 }
 
-static void hostfs_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
+static void hostfs_put_link(struct inode *unused, void *cookie)
 {
-       char *s = nd_get_link(nd);
-       if (!IS_ERR(s))
-               __putname(s);
+       __putname(cookie);
 }
 
 static const struct inode_operations hostfs_link_iops = {
index fa2bd5366ecf1f4c3d9b81c2d80336a621831dcf..2867837909a91ba005af78ea3ba4b5191e13c1d5 100644 (file)
@@ -642,20 +642,19 @@ static int hppfs_readlink(struct dentry *dentry, char __user *buffer,
                                                    buflen);
 }
 
-static void *hppfs_follow_link(struct dentry *dentry, struct nameidata *nd)
+static const char *hppfs_follow_link(struct dentry *dentry, void **cookie)
 {
        struct dentry *proc_dentry = HPPFS_I(d_inode(dentry))->proc_dentry;
 
-       return d_inode(proc_dentry)->i_op->follow_link(proc_dentry, nd);
+       return d_inode(proc_dentry)->i_op->follow_link(proc_dentry, cookie);
 }
 
-static void hppfs_put_link(struct dentry *dentry, struct nameidata *nd,
-                          void *cookie)
+static void hppfs_put_link(struct inode *inode, void *cookie)
 {
-       struct dentry *proc_dentry = HPPFS_I(d_inode(dentry))->proc_dentry;
+       struct inode *proc_inode = d_inode(HPPFS_I(inode)->proc_dentry);
 
-       if (d_inode(proc_dentry)->i_op->put_link)
-               d_inode(proc_dentry)->i_op->put_link(proc_dentry, nd, cookie);
+       if (proc_inode->i_op->put_link)
+               proc_inode->i_op->put_link(proc_inode, cookie);
 }
 
 static const struct inode_operations hppfs_dir_iops = {
index ea37cd17b53f0c98b47e2e626b9ff2a9b6e5699b..e8d62688ed9181e511e2a0e8c6a5f36840cdbe94 100644 (file)
@@ -152,6 +152,7 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
        inode->i_pipe = NULL;
        inode->i_bdev = NULL;
        inode->i_cdev = NULL;
+       inode->i_link = NULL;
        inode->i_rdev = 0;
        inode->dirtied_when = 0;
 
@@ -1584,36 +1585,47 @@ static int update_time(struct inode *inode, struct timespec *time, int flags)
  *     This function automatically handles read only file systems and media,
  *     as well as the "noatime" flag and inode specific "noatime" markers.
  */
-void touch_atime(const struct path *path)
+bool atime_needs_update(const struct path *path, struct inode *inode)
 {
        struct vfsmount *mnt = path->mnt;
-       struct inode *inode = d_inode(path->dentry);
        struct timespec now;
 
        if (inode->i_flags & S_NOATIME)
-               return;
+               return false;
        if (IS_NOATIME(inode))
-               return;
+               return false;
        if ((inode->i_sb->s_flags & MS_NODIRATIME) && S_ISDIR(inode->i_mode))
-               return;
+               return false;
 
        if (mnt->mnt_flags & MNT_NOATIME)
-               return;
+               return false;
        if ((mnt->mnt_flags & MNT_NODIRATIME) && S_ISDIR(inode->i_mode))
-               return;
+               return false;
 
        now = current_fs_time(inode->i_sb);
 
        if (!relatime_need_update(mnt, inode, now))
-               return;
+               return false;
 
        if (timespec_equal(&inode->i_atime, &now))
+               return false;
+
+       return true;
+}
+
+void touch_atime(const struct path *path)
+{
+       struct vfsmount *mnt = path->mnt;
+       struct inode *inode = d_inode(path->dentry);
+       struct timespec now;
+
+       if (!atime_needs_update(path, inode))
                return;
 
        if (!sb_start_write_trylock(inode->i_sb))
                return;
 
-       if (__mnt_want_write(mnt))
+       if (__mnt_want_write(mnt) != 0)
                goto skip_update;
        /*
         * File systems can error out when updating inodes if they need to
@@ -1624,6 +1636,7 @@ void touch_atime(const struct path *path)
         * We may also fail on filesystems that have the ability to make parts
         * of the fs read only, e.g. subvolumes in Btrfs.
         */
+       now = current_fs_time(inode->i_sb);
        update_time(inode, &now, S_ATIME);
        __mnt_drop_write(mnt);
 skip_update:
index b5128c6e63ad6644d19bf861a062d63f48265a4d..a9079d035ae59d9a6983bcaa79625ce5f0e5c343 100644 (file)
@@ -842,15 +842,23 @@ static int scan_revoke_records(journal_t *journal, struct buffer_head *bh,
 {
        jbd2_journal_revoke_header_t *header;
        int offset, max;
+       int csum_size = 0;
+       __u32 rcount;
        int record_len = 4;
 
        header = (jbd2_journal_revoke_header_t *) bh->b_data;
        offset = sizeof(jbd2_journal_revoke_header_t);
-       max = be32_to_cpu(header->r_count);
+       rcount = be32_to_cpu(header->r_count);
 
        if (!jbd2_revoke_block_csum_verify(journal, header))
                return -EINVAL;
 
+       if (jbd2_journal_has_csum_v2or3(journal))
+               csum_size = sizeof(struct jbd2_journal_revoke_tail);
+       if (rcount > journal->j_blocksize - csum_size)
+               return -EINVAL;
+       max = rcount;
+
        if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
                record_len = 8;
 
index c6cbaef2bda1498d8f2e00eeca30a40c8a462361..14214da80eb8ea5d781389ed5ff3b1e373f4e7b4 100644 (file)
@@ -577,7 +577,7 @@ static void write_one_revoke_record(journal_t *journal,
 {
        int csum_size = 0;
        struct buffer_head *descriptor;
-       int offset;
+       int sz, offset;
        journal_header_t *header;
 
        /* If we are already aborting, this all becomes a noop.  We
@@ -594,9 +594,14 @@ static void write_one_revoke_record(journal_t *journal,
        if (jbd2_journal_has_csum_v2or3(journal))
                csum_size = sizeof(struct jbd2_journal_revoke_tail);
 
+       if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
+               sz = 8;
+       else
+               sz = 4;
+
        /* Make sure we have a descriptor with space left for the record */
        if (descriptor) {
-               if (offset >= journal->j_blocksize - csum_size) {
+               if (offset + sz > journal->j_blocksize - csum_size) {
                        flush_descriptor(journal, descriptor, offset, write_op);
                        descriptor = NULL;
                }
@@ -619,16 +624,13 @@ static void write_one_revoke_record(journal_t *journal,
                *descriptorp = descriptor;
        }
 
-       if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT)) {
+       if (JBD2_HAS_INCOMPAT_FEATURE(journal, JBD2_FEATURE_INCOMPAT_64BIT))
                * ((__be64 *)(&descriptor->b_data[offset])) =
                        cpu_to_be64(record->blocknr);
-               offset += 8;
-
-       } else {
+       else
                * ((__be32 *)(&descriptor->b_data[offset])) =
                        cpu_to_be32(record->blocknr);
-               offset += 4;
-       }
+       offset += sz;
 
        *offsetp = offset;
 }
index 5f09370c90a8199647a87ef7cb6a5cfdba113d2b..ff2f2e6ad3114664cbcd80c3812b6b875f64807b 100644 (file)
@@ -551,7 +551,6 @@ int jbd2_journal_extend(handle_t *handle, int nblocks)
        int result;
        int wanted;
 
-       WARN_ON(!transaction);
        if (is_handle_aborted(handle))
                return -EROFS;
        journal = transaction->t_journal;
@@ -627,7 +626,6 @@ int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask)
        tid_t           tid;
        int             need_to_start, ret;
 
-       WARN_ON(!transaction);
        /* If we've had an abort of any type, don't even think about
         * actually doing the restart! */
        if (is_handle_aborted(handle))
@@ -785,7 +783,6 @@ do_get_write_access(handle_t *handle, struct journal_head *jh,
        int need_copy = 0;
        unsigned long start_lock, time_lock;
 
-       WARN_ON(!transaction);
        if (is_handle_aborted(handle))
                return -EROFS;
        journal = transaction->t_journal;
@@ -1051,7 +1048,6 @@ int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh)
        int err;
 
        jbd_debug(5, "journal_head %p\n", jh);
-       WARN_ON(!transaction);
        err = -EROFS;
        if (is_handle_aborted(handle))
                goto out;
@@ -1266,7 +1262,6 @@ int jbd2_journal_dirty_metadata(handle_t *handle, struct buffer_head *bh)
        struct journal_head *jh;
        int ret = 0;
 
-       WARN_ON(!transaction);
        if (is_handle_aborted(handle))
                return -EROFS;
        journal = transaction->t_journal;
@@ -1397,7 +1392,6 @@ int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh)
        int err = 0;
        int was_modified = 0;
 
-       WARN_ON(!transaction);
        if (is_handle_aborted(handle))
                return -EROFS;
        journal = transaction->t_journal;
@@ -1530,8 +1524,22 @@ int jbd2_journal_stop(handle_t *handle)
        tid_t tid;
        pid_t pid;
 
-       if (!transaction)
-               goto free_and_exit;
+       if (!transaction) {
+               /*
+                * Handle is already detached from the transaction so
+                * there is nothing to do other than decrease a refcount,
+                * or free the handle if refcount drops to zero
+                */
+               if (--handle->h_ref > 0) {
+                       jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1,
+                                                        handle->h_ref);
+                       return err;
+               } else {
+                       if (handle->h_rsv_handle)
+                               jbd2_free_handle(handle->h_rsv_handle);
+                       goto free_and_exit;
+               }
+       }
        journal = transaction->t_journal;
 
        J_ASSERT(journal_current_handle() == handle);
@@ -2373,7 +2381,6 @@ int jbd2_journal_file_inode(handle_t *handle, struct jbd2_inode *jinode)
        transaction_t *transaction = handle->h_transaction;
        journal_t *journal;
 
-       WARN_ON(!transaction);
        if (is_handle_aborted(handle))
                return -EROFS;
        journal = transaction->t_journal;
index 1ba5c97943b8751f0210870a7bb51636dd4e5ecd..81180022923fbd8ccd499d0b74af05b59695c302 100644 (file)
@@ -354,6 +354,7 @@ static int jffs2_symlink (struct inode *dir_i, struct dentry *dentry, const char
                ret = -ENOMEM;
                goto fail;
        }
+       inode->i_link = f->target;
 
        jffs2_dbg(1, "%s(): symlink's target '%s' cached\n",
                  __func__, (char *)f->target);
index fe5ea080b4ec810f29589b257f605389067dadf4..60d86e8fba6e9561bb4204ef2c99fe7c744288da 100644 (file)
@@ -294,6 +294,7 @@ struct inode *jffs2_iget(struct super_block *sb, unsigned long ino)
 
        case S_IFLNK:
                inode->i_op = &jffs2_symlink_inode_operations;
+               inode->i_link = f->target;
                break;
 
        case S_IFDIR:
index 1fefa25d0fa586a14caae06efea7363794be38fc..8ce2f240125b39803b4ebf2d681b6a95d40c33f5 100644 (file)
@@ -9,58 +9,15 @@
  *
  */
 
-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
-
-#include <linux/kernel.h>
-#include <linux/fs.h>
-#include <linux/namei.h>
 #include "nodelist.h"
 
-static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd);
-
 const struct inode_operations jffs2_symlink_inode_operations =
 {
        .readlink =     generic_readlink,
-       .follow_link =  jffs2_follow_link,
+       .follow_link =  simple_follow_link,
        .setattr =      jffs2_setattr,
        .setxattr =     jffs2_setxattr,
        .getxattr =     jffs2_getxattr,
        .listxattr =    jffs2_listxattr,
        .removexattr =  jffs2_removexattr
 };
-
-static void *jffs2_follow_link(struct dentry *dentry, struct nameidata *nd)
-{
-       struct jffs2_inode_info *f = JFFS2_INODE_INFO(d_inode(dentry));
-       char *p = (char *)f->target;
-
-       /*
-        * We don't acquire the f->sem mutex here since the only data we
-        * use is f->target.
-        *
-        * 1. If we are here the inode has already built and f->target has
-        * to point to the target path.
-        * 2. Nobody uses f->target (if the inode is symlink's inode). The
-        * exception is inode freeing function which frees f->target. But
-        * it can't be called while we are here and before VFS has
-        * stopped using our f->target string which we provide by means of
-        * nd_set_link() call.
-        */
-
-       if (!p) {
-               pr_err("%s(): can't find symlink target\n", __func__);
-               p = ERR_PTR(-EIO);
-       }
-       jffs2_dbg(1, "%s(): target path is '%s'\n",
-                 __func__, (char *)f->target);
-
-       nd_set_link(nd, p);
-
-       /*
-        * We will unlock the f->sem mutex but VFS will use the f->target string. This is safe
-        * since the only way that may cause f->target to be changed is iput() operation.
-        * But VFS will not use f->target after iput() has been called.
-        */
-       return NULL;
-}
-
index 070dc4b335449423091e67dd74c0f1c34617b041..6f1cb2b5ee285dd50622f719296fe71284d5f826 100644 (file)
@@ -63,11 +63,12 @@ struct inode *jfs_iget(struct super_block *sb, unsigned long ino)
                        inode->i_mapping->a_ops = &jfs_aops;
                } else {
                        inode->i_op = &jfs_fast_symlink_inode_operations;
+                       inode->i_link = JFS_IP(inode)->i_inline;
                        /*
                         * The inline data should be null-terminated, but
                         * don't let on-disk corruption crash the kernel
                         */
-                       JFS_IP(inode)->i_inline[inode->i_size] = '\0';
+                       inode->i_link[inode->i_size] = '\0';
                }
        } else {
                inode->i_op = &jfs_file_inode_operations;
index 66db7bc0ed1096050c2b2f93c3aa9e16a0ba4726..e33be921aa41b5ae56a4054605aaeea5b8065987 100644 (file)
@@ -880,7 +880,6 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
        int ssize;              /* source pathname size */
        struct btstack btstack;
        struct inode *ip = d_inode(dentry);
-       unchar *i_fastsymlink;
        s64 xlen = 0;
        int bmask = 0, xsize;
        s64 xaddr;
@@ -946,8 +945,8 @@ static int jfs_symlink(struct inode *dip, struct dentry *dentry,
        if (ssize <= IDATASIZE) {
                ip->i_op = &jfs_fast_symlink_inode_operations;
 
-               i_fastsymlink = JFS_IP(ip)->i_inline;
-               memcpy(i_fastsymlink, name, ssize);
+               ip->i_link = JFS_IP(ip)->i_inline;
+               memcpy(ip->i_link, name, ssize);
                ip->i_size = ssize - 1;
 
                /*
index 80f42bcc4ef1295669de10ea1087384f1c98c95b..5929e2363cb85eddc0d54bf3a04754383cb395db 100644 (file)
  */
 
 #include <linux/fs.h>
-#include <linux/namei.h>
 #include "jfs_incore.h"
 #include "jfs_inode.h"
 #include "jfs_xattr.h"
 
-static void *jfs_follow_link(struct dentry *dentry, struct nameidata *nd)
-{
-       char *s = JFS_IP(d_inode(dentry))->i_inline;
-       nd_set_link(nd, s);
-       return NULL;
-}
-
 const struct inode_operations jfs_fast_symlink_inode_operations = {
        .readlink       = generic_readlink,
-       .follow_link    = jfs_follow_link,
+       .follow_link    = simple_follow_link,
        .setattr        = jfs_setattr,
        .setxattr       = jfs_setxattr,
        .getxattr       = jfs_getxattr,
index f131fc23ffc4c18f03a9764973fa998bf0e5f79e..fffca9517321c88ee1e0128b864b257c56e2350d 100644 (file)
@@ -518,7 +518,14 @@ static struct kernfs_node *__kernfs_new_node(struct kernfs_root *root,
        if (!kn)
                goto err_out1;
 
-       ret = ida_simple_get(&root->ino_ida, 1, 0, GFP_KERNEL);
+       /*
+        * If the ino of the sysfs entry created for a kmem cache gets
+        * allocated from an ida layer, which is accounted to the memcg that
+        * owns the cache, the memcg will get pinned forever. So do not account
+        * ino ida allocations.
+        */
+       ret = ida_simple_get(&root->ino_ida, 1, 0,
+                            GFP_KERNEL | __GFP_NOACCOUNT);
        if (ret < 0)
                goto err_out2;
        kn->ino = ret;
index 8a198898e39afd3ffde994cee7d732dcdfa8bdcd..db272528ab5bb01c192b5502650f29e0784663ce 100644 (file)
@@ -112,25 +112,18 @@ static int kernfs_getlink(struct dentry *dentry, char *path)
        return error;
 }
 
-static void *kernfs_iop_follow_link(struct dentry *dentry, struct nameidata *nd)
+static const char *kernfs_iop_follow_link(struct dentry *dentry, void **cookie)
 {
        int error = -ENOMEM;
        unsigned long page = get_zeroed_page(GFP_KERNEL);
-       if (page) {
-               error = kernfs_getlink(dentry, (char *) page);
-               if (error < 0)
-                       free_page((unsigned long)page);
-       }
-       nd_set_link(nd, error ? ERR_PTR(error) : (char *)page);
-       return NULL;
-}
-
-static void kernfs_iop_put_link(struct dentry *dentry, struct nameidata *nd,
-                               void *cookie)
-{
-       char *page = nd_get_link(nd);
-       if (!IS_ERR(page))
+       if (!page)
+               return ERR_PTR(-ENOMEM);
+       error = kernfs_getlink(dentry, (char *)page);
+       if (unlikely(error < 0)) {
                free_page((unsigned long)page);
+               return ERR_PTR(error);
+       }
+       return *cookie = (char *)page;
 }
 
 const struct inode_operations kernfs_symlink_iops = {
@@ -140,7 +133,7 @@ const struct inode_operations kernfs_symlink_iops = {
        .listxattr      = kernfs_iop_listxattr,
        .readlink       = generic_readlink,
        .follow_link    = kernfs_iop_follow_link,
-       .put_link       = kernfs_iop_put_link,
+       .put_link       = free_page_put_link,
        .setattr        = kernfs_iop_setattr,
        .getattr        = kernfs_iop_getattr,
        .permission     = kernfs_iop_permission,
index cb1fb4b9b6377b09b669b833cc3437b259b622f6..65e1feca8b982c55bff37e5a85529f8cb0d4121e 100644 (file)
@@ -1024,15 +1024,18 @@ int noop_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 }
 EXPORT_SYMBOL(noop_fsync);
 
-void kfree_put_link(struct dentry *dentry, struct nameidata *nd,
-                               void *cookie)
+void kfree_put_link(struct inode *unused, void *cookie)
 {
-       char *s = nd_get_link(nd);
-       if (!IS_ERR(s))
-               kfree(s);
+       kfree(cookie);
 }
 EXPORT_SYMBOL(kfree_put_link);
 
+void free_page_put_link(struct inode *unused, void *cookie)
+{
+       free_page((unsigned long) cookie);
+}
+EXPORT_SYMBOL(free_page_put_link);
+
 /*
  * nop .set_page_dirty method so that people can use .page_mkwrite on
  * anon inodes.
@@ -1093,3 +1096,15 @@ simple_nosetlease(struct file *filp, long arg, struct file_lock **flp,
        return -EINVAL;
 }
 EXPORT_SYMBOL(simple_nosetlease);
+
+const char *simple_follow_link(struct dentry *dentry, void **cookie)
+{
+       return d_inode(dentry)->i_link;
+}
+EXPORT_SYMBOL(simple_follow_link);
+
+const struct inode_operations simple_symlink_inode_operations = {
+       .follow_link = simple_follow_link,
+       .readlink = generic_readlink
+};
+EXPORT_SYMBOL(simple_symlink_inode_operations);
index 4cf38f1185494115c0dcb8625f07b6593ae15fd5..f9b45d46d4c483ea0be1ceca4c35b9b3075b56b9 100644 (file)
@@ -779,6 +779,7 @@ fail:
 const struct inode_operations logfs_symlink_iops = {
        .readlink       = generic_readlink,
        .follow_link    = page_follow_link_light,
+       .put_link       = page_put_link,
 };
 
 const struct inode_operations logfs_dir_iops = {
index 6a61c2b3e385cfabcf53cdf13ed8ae9afa99ce1d..b5b8082bfa4208086a7ee06741cb76670d51bab9 100644 (file)
@@ -88,6 +88,7 @@ static inline int is_mounted(struct vfsmount *mnt)
 extern struct mount *__lookup_mnt(struct vfsmount *, struct dentry *);
 extern struct mount *__lookup_mnt_last(struct vfsmount *, struct dentry *);
 
+extern int __legitimize_mnt(struct vfsmount *, unsigned);
 extern bool legitimize_mnt(struct vfsmount *, unsigned);
 
 extern void __detach_mounts(struct dentry *dentry);
index 4a8d998b7274b3406532cc012d05ee18aa9b5bba..2dad0eaf91d34d8f47d3cc525eafd45107d429bd 100644 (file)
@@ -492,6 +492,7 @@ void path_put(const struct path *path)
 }
 EXPORT_SYMBOL(path_put);
 
+#define EMBEDDED_LEVELS 2
 struct nameidata {
        struct path     path;
        struct qstr     last;
@@ -501,10 +502,139 @@ struct nameidata {
        unsigned        seq, m_seq;
        int             last_type;
        unsigned        depth;
-       struct file     *base;
-       char *saved_names[MAX_NESTED_LINKS + 1];
+       int             total_link_count;
+       struct saved {
+               struct path link;
+               void *cookie;
+               const char *name;
+               struct inode *inode;
+               unsigned seq;
+       } *stack, internal[EMBEDDED_LEVELS];
+       struct filename *name;
+       struct nameidata *saved;
+       unsigned        root_seq;
+       int             dfd;
 };
 
+static void set_nameidata(struct nameidata *p, int dfd, struct filename *name)
+{
+       struct nameidata *old = current->nameidata;
+       p->stack = p->internal;
+       p->dfd = dfd;
+       p->name = name;
+       p->total_link_count = old ? old->total_link_count : 0;
+       p->saved = old;
+       current->nameidata = p;
+}
+
+static void restore_nameidata(void)
+{
+       struct nameidata *now = current->nameidata, *old = now->saved;
+
+       current->nameidata = old;
+       if (old)
+               old->total_link_count = now->total_link_count;
+       if (now->stack != now->internal) {
+               kfree(now->stack);
+               now->stack = now->internal;
+       }
+}
+
+static int __nd_alloc_stack(struct nameidata *nd)
+{
+       struct saved *p;
+
+       if (nd->flags & LOOKUP_RCU) {
+               p= kmalloc(MAXSYMLINKS * sizeof(struct saved),
+                                 GFP_ATOMIC);
+               if (unlikely(!p))
+                       return -ECHILD;
+       } else {
+               p= kmalloc(MAXSYMLINKS * sizeof(struct saved),
+                                 GFP_KERNEL);
+               if (unlikely(!p))
+                       return -ENOMEM;
+       }
+       memcpy(p, nd->internal, sizeof(nd->internal));
+       nd->stack = p;
+       return 0;
+}
+
+static inline int nd_alloc_stack(struct nameidata *nd)
+{
+       if (likely(nd->depth != EMBEDDED_LEVELS))
+               return 0;
+       if (likely(nd->stack != nd->internal))
+               return 0;
+       return __nd_alloc_stack(nd);
+}
+
+static void drop_links(struct nameidata *nd)
+{
+       int i = nd->depth;
+       while (i--) {
+               struct saved *last = nd->stack + i;
+               struct inode *inode = last->inode;
+               if (last->cookie && inode->i_op->put_link) {
+                       inode->i_op->put_link(inode, last->cookie);
+                       last->cookie = NULL;
+               }
+       }
+}
+
+static void terminate_walk(struct nameidata *nd)
+{
+       drop_links(nd);
+       if (!(nd->flags & LOOKUP_RCU)) {
+               int i;
+               path_put(&nd->path);
+               for (i = 0; i < nd->depth; i++)
+                       path_put(&nd->stack[i].link);
+               if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
+                       path_put(&nd->root);
+                       nd->root.mnt = NULL;
+               }
+       } else {
+               nd->flags &= ~LOOKUP_RCU;
+               if (!(nd->flags & LOOKUP_ROOT))
+                       nd->root.mnt = NULL;
+               rcu_read_unlock();
+       }
+       nd->depth = 0;
+}
+
+/* path_put is needed afterwards regardless of success or failure */
+static bool legitimize_path(struct nameidata *nd,
+                           struct path *path, unsigned seq)
+{
+       int res = __legitimize_mnt(path->mnt, nd->m_seq);
+       if (unlikely(res)) {
+               if (res > 0)
+                       path->mnt = NULL;
+               path->dentry = NULL;
+               return false;
+       }
+       if (unlikely(!lockref_get_not_dead(&path->dentry->d_lockref))) {
+               path->dentry = NULL;
+               return false;
+       }
+       return !read_seqcount_retry(&path->dentry->d_seq, seq);
+}
+
+static bool legitimize_links(struct nameidata *nd)
+{
+       int i;
+       for (i = 0; i < nd->depth; i++) {
+               struct saved *last = nd->stack + i;
+               if (unlikely(!legitimize_path(nd, &last->link, last->seq))) {
+                       drop_links(nd);
+                       nd->depth = i + 1;
+                       return false;
+               }
+       }
+       return true;
+}
+
 /*
  * Path walking has 2 modes, rcu-walk and ref-walk (see
  * Documentation/filesystems/path-lookup.txt).  In situations when we can't
@@ -520,35 +650,28 @@ struct nameidata {
  * unlazy_walk - try to switch to ref-walk mode.
  * @nd: nameidata pathwalk data
  * @dentry: child of nd->path.dentry or NULL
+ * @seq: seq number to check dentry against
  * Returns: 0 on success, -ECHILD on failure
  *
  * unlazy_walk attempts to legitimize the current nd->path, nd->root and dentry
  * for ref-walk mode.  @dentry must be a path found by a do_lookup call on
  * @nd or NULL.  Must be called from rcu-walk context.
+ * Nothing should touch nameidata between unlazy_walk() failure and
+ * terminate_walk().
  */
-static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
+static int unlazy_walk(struct nameidata *nd, struct dentry *dentry, unsigned seq)
 {
-       struct fs_struct *fs = current->fs;
        struct dentry *parent = nd->path.dentry;
 
        BUG_ON(!(nd->flags & LOOKUP_RCU));
 
-       /*
-        * After legitimizing the bastards, terminate_walk()
-        * will do the right thing for non-RCU mode, and all our
-        * subsequent exit cases should rcu_read_unlock()
-        * before returning.  Do vfsmount first; if dentry
-        * can't be legitimized, just set nd->path.dentry to NULL
-        * and rely on dput(NULL) being a no-op.
-        */
-       if (!legitimize_mnt(nd->path.mnt, nd->m_seq))
-               return -ECHILD;
        nd->flags &= ~LOOKUP_RCU;
-
-       if (!lockref_get_not_dead(&parent->d_lockref)) {
-               nd->path.dentry = NULL; 
-               goto out;
-       }
+       if (unlikely(!legitimize_links(nd)))
+               goto out2;
+       if (unlikely(!legitimize_mnt(nd->path.mnt, nd->m_seq)))
+               goto out2;
+       if (unlikely(!lockref_get_not_dead(&parent->d_lockref)))
+               goto out1;
 
        /*
         * For a negative lookup, the lookup sequence point is the parents
@@ -568,7 +691,7 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
        } else {
                if (!lockref_get_not_dead(&dentry->d_lockref))
                        goto out;
-               if (read_seqcount_retry(&dentry->d_seq, nd->seq))
+               if (read_seqcount_retry(&dentry->d_seq, seq))
                        goto drop_dentry;
        }
 
@@ -577,22 +700,24 @@ static int unlazy_walk(struct nameidata *nd, struct dentry *dentry)
         * still valid and get it if required.
         */
        if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
-               spin_lock(&fs->lock);
-               if (nd->root.mnt != fs->root.mnt || nd->root.dentry != fs->root.dentry)
-                       goto unlock_and_drop_dentry;
-               path_get(&nd->root);
-               spin_unlock(&fs->lock);
+               if (unlikely(!legitimize_path(nd, &nd->root, nd->root_seq))) {
+                       rcu_read_unlock();
+                       dput(dentry);
+                       return -ECHILD;
+               }
        }
 
        rcu_read_unlock();
        return 0;
 
-unlock_and_drop_dentry:
-       spin_unlock(&fs->lock);
 drop_dentry:
        rcu_read_unlock();
        dput(dentry);
        goto drop_root_mnt;
+out2:
+       nd->path.mnt = NULL;
+out1:
+       nd->path.dentry = NULL;
 out:
        rcu_read_unlock();
 drop_root_mnt:
@@ -601,6 +726,24 @@ drop_root_mnt:
        return -ECHILD;
 }
 
+static int unlazy_link(struct nameidata *nd, struct path *link, unsigned seq)
+{
+       if (unlikely(!legitimize_path(nd, link, seq))) {
+               drop_links(nd);
+               nd->depth = 0;
+               nd->flags &= ~LOOKUP_RCU;
+               nd->path.mnt = NULL;
+               nd->path.dentry = NULL;
+               if (!(nd->flags & LOOKUP_ROOT))
+                       nd->root.mnt = NULL;
+               rcu_read_unlock();
+       } else if (likely(unlazy_walk(nd, NULL, 0)) == 0) {
+               return 0;
+       }
+       path_put(link);
+       return -ECHILD;
+}
+
 static inline int d_revalidate(struct dentry *dentry, unsigned int flags)
 {
        return dentry->d_op->d_revalidate(dentry, flags);
@@ -622,26 +765,10 @@ static int complete_walk(struct nameidata *nd)
        int status;
 
        if (nd->flags & LOOKUP_RCU) {
-               nd->flags &= ~LOOKUP_RCU;
                if (!(nd->flags & LOOKUP_ROOT))
                        nd->root.mnt = NULL;
-
-               if (!legitimize_mnt(nd->path.mnt, nd->m_seq)) {
-                       rcu_read_unlock();
+               if (unlikely(unlazy_walk(nd, NULL, 0)))
                        return -ECHILD;
-               }
-               if (unlikely(!lockref_get_not_dead(&dentry->d_lockref))) {
-                       rcu_read_unlock();
-                       mntput(nd->path.mnt);
-                       return -ECHILD;
-               }
-               if (read_seqcount_retry(&dentry->d_seq, nd->seq)) {
-                       rcu_read_unlock();
-                       dput(dentry);
-                       mntput(nd->path.mnt);
-                       return -ECHILD;
-               }
-               rcu_read_unlock();
        }
 
        if (likely(!(nd->flags & LOOKUP_JUMPED)))
@@ -657,28 +784,25 @@ static int complete_walk(struct nameidata *nd)
        if (!status)
                status = -ESTALE;
 
-       path_put(&nd->path);
        return status;
 }
 
-static __always_inline void set_root(struct nameidata *nd)
+static void set_root(struct nameidata *nd)
 {
        get_fs_root(current->fs, &nd->root);
 }
 
-static int link_path_walk(const char *, struct nameidata *);
-
-static __always_inline unsigned set_root_rcu(struct nameidata *nd)
+static unsigned set_root_rcu(struct nameidata *nd)
 {
        struct fs_struct *fs = current->fs;
-       unsigned seq, res;
+       unsigned seq;
 
        do {
                seq = read_seqcount_begin(&fs->seq);
                nd->root = fs->root;
-               res = __read_seqcount_begin(&nd->root.dentry->d_seq);
+               nd->root_seq = __read_seqcount_begin(&nd->root.dentry->d_seq);
        } while (read_seqcount_retry(&fs->seq, seq));
-       return res;
+       return nd->root_seq;
 }
 
 static void path_put_conditional(struct path *path, struct nameidata *nd)
@@ -704,8 +828,9 @@ static inline void path_to_nameidata(const struct path *path,
  * Helper to directly jump to a known parsed path from ->follow_link,
  * caller must have taken a reference to path beforehand.
  */
-void nd_jump_link(struct nameidata *nd, struct path *path)
+void nd_jump_link(struct path *path)
 {
+       struct nameidata *nd = current->nameidata;
        path_put(&nd->path);
 
        nd->path = *path;
@@ -713,24 +838,14 @@ void nd_jump_link(struct nameidata *nd, struct path *path)
        nd->flags |= LOOKUP_JUMPED;
 }
 
-void nd_set_link(struct nameidata *nd, char *path)
+static inline void put_link(struct nameidata *nd)
 {
-       nd->saved_names[nd->depth] = path;
-}
-EXPORT_SYMBOL(nd_set_link);
-
-char *nd_get_link(struct nameidata *nd)
-{
-       return nd->saved_names[nd->depth];
-}
-EXPORT_SYMBOL(nd_get_link);
-
-static inline void put_link(struct nameidata *nd, struct path *link, void *cookie)
-{
-       struct inode *inode = link->dentry->d_inode;
-       if (inode->i_op->put_link)
-               inode->i_op->put_link(link->dentry, nd, cookie);
-       path_put(link);
+       struct saved *last = nd->stack + --nd->depth;
+       struct inode *inode = last->inode;
+       if (last->cookie && inode->i_op->put_link)
+               inode->i_op->put_link(inode, last->cookie);
+       if (!(nd->flags & LOOKUP_RCU))
+               path_put(&last->link);
 }
 
 int sysctl_protected_symlinks __read_mostly = 0;
@@ -738,7 +853,6 @@ int sysctl_protected_hardlinks __read_mostly = 0;
 
 /**
  * may_follow_link - Check symlink following for unsafe situations
- * @link: The path of the symlink
  * @nd: nameidata pathwalk data
  *
  * In the case of the sysctl_protected_symlinks sysctl being enabled,
@@ -752,7 +866,7 @@ int sysctl_protected_hardlinks __read_mostly = 0;
  *
  * Returns 0 if following the symlink is allowed, -ve on error.
  */
-static inline int may_follow_link(struct path *link, struct nameidata *nd)
+static inline int may_follow_link(struct nameidata *nd)
 {
        const struct inode *inode;
        const struct inode *parent;
@@ -761,7 +875,7 @@ static inline int may_follow_link(struct path *link, struct nameidata *nd)
                return 0;
 
        /* Allowed if owner and follower match. */
-       inode = link->dentry->d_inode;
+       inode = nd->stack[0].inode;
        if (uid_eq(current_cred()->fsuid, inode->i_uid))
                return 0;
 
@@ -774,9 +888,10 @@ static inline int may_follow_link(struct path *link, struct nameidata *nd)
        if (uid_eq(parent->i_uid, inode->i_uid))
                return 0;
 
-       audit_log_link_denied("follow_link", link);
-       path_put_conditional(link, nd);
-       path_put(&nd->path);
+       if (nd->flags & LOOKUP_RCU)
+               return -ECHILD;
+
+       audit_log_link_denied("follow_link", &nd->stack[0].link);
        return -EACCES;
 }
 
@@ -849,82 +964,68 @@ static int may_linkat(struct path *link)
        return -EPERM;
 }
 
-static __always_inline int
-follow_link(struct path *link, struct nameidata *nd, void **p)
+static __always_inline
+const char *get_link(struct nameidata *nd)
 {
-       struct dentry *dentry = link->dentry;
+       struct saved *last = nd->stack + nd->depth - 1;
+       struct dentry *dentry = last->link.dentry;
+       struct inode *inode = last->inode;
        int error;
-       char *s;
-
-       BUG_ON(nd->flags & LOOKUP_RCU);
-
-       if (link->mnt == nd->path.mnt)
-               mntget(link->mnt);
+       const char *res;
 
-       error = -ELOOP;
-       if (unlikely(current->total_link_count >= 40))
-               goto out_put_nd_path;
-
-       cond_resched();
-       current->total_link_count++;
-
-       touch_atime(link);
-       nd_set_link(nd, NULL);
+       if (!(nd->flags & LOOKUP_RCU)) {
+               touch_atime(&last->link);
+               cond_resched();
+       } else if (atime_needs_update(&last->link, inode)) {
+               if (unlikely(unlazy_walk(nd, NULL, 0)))
+                       return ERR_PTR(-ECHILD);
+               touch_atime(&last->link);
+       }
 
-       error = security_inode_follow_link(link->dentry, nd);
-       if (error)
-               goto out_put_nd_path;
+       error = security_inode_follow_link(dentry, inode,
+                                          nd->flags & LOOKUP_RCU);
+       if (unlikely(error))
+               return ERR_PTR(error);
 
        nd->last_type = LAST_BIND;
-       *p = dentry->d_inode->i_op->follow_link(dentry, nd);
-       error = PTR_ERR(*p);
-       if (IS_ERR(*p))
-               goto out_put_nd_path;
-
-       error = 0;
-       s = nd_get_link(nd);
-       if (s) {
-               if (unlikely(IS_ERR(s))) {
-                       path_put(&nd->path);
-                       put_link(nd, link, *p);
-                       return PTR_ERR(s);
+       res = inode->i_link;
+       if (!res) {
+               if (nd->flags & LOOKUP_RCU) {
+                       if (unlikely(unlazy_walk(nd, NULL, 0)))
+                               return ERR_PTR(-ECHILD);
                }
-               if (*s == '/') {
+               res = inode->i_op->follow_link(dentry, &last->cookie);
+               if (IS_ERR_OR_NULL(res)) {
+                       last->cookie = NULL;
+                       return res;
+               }
+       }
+       if (*res == '/') {
+               if (nd->flags & LOOKUP_RCU) {
+                       struct dentry *d;
+                       if (!nd->root.mnt)
+                               set_root_rcu(nd);
+                       nd->path = nd->root;
+                       d = nd->path.dentry;
+                       nd->inode = d->d_inode;
+                       nd->seq = nd->root_seq;
+                       if (unlikely(read_seqcount_retry(&d->d_seq, nd->seq)))
+                               return ERR_PTR(-ECHILD);
+               } else {
                        if (!nd->root.mnt)
                                set_root(nd);
                        path_put(&nd->path);
                        nd->path = nd->root;
                        path_get(&nd->root);
-                       nd->flags |= LOOKUP_JUMPED;
+                       nd->inode = nd->path.dentry->d_inode;
                }
-               nd->inode = nd->path.dentry->d_inode;
-               error = link_path_walk(s, nd);
-               if (unlikely(error))
-                       put_link(nd, link, *p);
+               nd->flags |= LOOKUP_JUMPED;
+               while (unlikely(*++res == '/'))
+                       ;
        }
-
-       return error;
-
-out_put_nd_path:
-       *p = NULL;
-       path_put(&nd->path);
-       path_put(link);
-       return error;
-}
-
-static int follow_up_rcu(struct path *path)
-{
-       struct mount *mnt = real_mount(path->mnt);
-       struct mount *parent;
-       struct dentry *mountpoint;
-
-       parent = mnt->mnt_parent;
-       if (&parent->mnt == path->mnt)
-               return 0;
-       mountpoint = mnt->mnt_mountpoint;
-       path->dentry = mountpoint;
-       path->mnt = &parent->mnt;
-       return 1;
+       if (!*res)
+               res = NULL;
+       return res;
 }
 
 /*
@@ -965,7 +1066,7 @@ EXPORT_SYMBOL(follow_up);
  * - return -EISDIR to tell follow_managed() to stop and return the path we
  *   were called with.
  */
-static int follow_automount(struct path *path, unsigned flags,
+static int follow_automount(struct path *path, struct nameidata *nd,
                            bool *need_mntput)
 {
        struct vfsmount *mnt;
@@ -985,13 +1086,13 @@ static int follow_automount(struct path *path, unsigned flags,
         * as being automount points.  These will need the attentions
         * of the daemon to instantiate them before they can be used.
         */
-       if (!(flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
-                    LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) &&
+       if (!(nd->flags & (LOOKUP_PARENT | LOOKUP_DIRECTORY |
+                          LOOKUP_OPEN | LOOKUP_CREATE | LOOKUP_AUTOMOUNT)) &&
            path->dentry->d_inode)
                return -EISDIR;
 
-       current->total_link_count++;
-       if (current->total_link_count >= 40)
+       nd->total_link_count++;
+       if (nd->total_link_count >= 40)
                return -ELOOP;
 
        mnt = path->dentry->d_op->d_automount(path);
@@ -1005,7 +1106,7 @@ static int follow_automount(struct path *path, unsigned flags,
                 * the path being looked up; if it wasn't then the remainder of
                 * the path is inaccessible and we should say so.
                 */
-               if (PTR_ERR(mnt) == -EISDIR && (flags & LOOKUP_PARENT))
+               if (PTR_ERR(mnt) == -EISDIR && (nd->flags & LOOKUP_PARENT))
                        return -EREMOTE;
                return PTR_ERR(mnt);
        }
@@ -1045,7 +1146,7 @@ static int follow_automount(struct path *path, unsigned flags,
  *
  * Serialization is taken care of in namespace.c
  */
-static int follow_managed(struct path *path, unsigned flags)
+static int follow_managed(struct path *path, struct nameidata *nd)
 {
        struct vfsmount *mnt = path->mnt; /* held by caller, must be left alone */
        unsigned managed;
@@ -1089,7 +1190,7 @@ static int follow_managed(struct path *path, unsigned flags)
 
                /* Handle an automount point */
                if (managed & DCACHE_NEED_AUTOMOUNT) {
-                       ret = follow_automount(path, flags, &need_mntput);
+                       ret = follow_automount(path, nd, &need_mntput);
                        if (ret < 0)
                                break;
                        continue;
@@ -1103,7 +1204,11 @@ static int follow_managed(struct path *path, unsigned flags)
                mntput(path->mnt);
        if (ret == -EISDIR)
                ret = 0;
-       return ret < 0 ? ret : need_mntput;
+       if (need_mntput)
+               nd->flags |= LOOKUP_JUMPED;
+       if (unlikely(ret < 0))
+               path_put_conditional(path, nd);
+       return ret;
 }
 
 int follow_down_one(struct path *path)
@@ -1133,7 +1238,7 @@ static inline int managed_dentry_rcu(struct dentry *dentry)
  * we meet a managed dentry that would need blocking.
  */
 static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
-                              struct inode **inode)
+                              struct inode **inode, unsigned *seqp)
 {
        for (;;) {
                struct mount *mounted;
@@ -1160,7 +1265,7 @@ static bool __follow_mount_rcu(struct nameidata *nd, struct path *path,
                path->mnt = &mounted->mnt;
                path->dentry = mounted->mnt.mnt_root;
                nd->flags |= LOOKUP_JUMPED;
-               nd->seq = read_seqcount_begin(&path->dentry->d_seq);
+               *seqp = read_seqcount_begin(&path->dentry->d_seq);
                /*
                 * Update the inode too. We don't need to re-check the
                 * dentry sequence number here after this d_inode read,
@@ -1179,10 +1284,8 @@ static int follow_dotdot_rcu(struct nameidata *nd)
                set_root_rcu(nd);
 
        while (1) {
-               if (nd->path.dentry == nd->root.dentry &&
-                   nd->path.mnt == nd->root.mnt) {
+               if (path_equal(&nd->path, &nd->root))
                        break;
-               }
                if (nd->path.dentry != nd->path.mnt->mnt_root) {
                        struct dentry *old = nd->path.dentry;
                        struct dentry *parent = old->d_parent;
@@ -1190,38 +1293,42 @@ static int follow_dotdot_rcu(struct nameidata *nd)
 
                        inode = parent->d_inode;
                        seq = read_seqcount_begin(&parent->d_seq);
-                       if (read_seqcount_retry(&old->d_seq, nd->seq))
-                               goto failed;
+                       if (unlikely(read_seqcount_retry(&old->d_seq, nd->seq)))
+                               return -ECHILD;
                        nd->path.dentry = parent;
                        nd->seq = seq;
                        break;
+               } else {
+                       struct mount *mnt = real_mount(nd->path.mnt);
+                       struct mount *mparent = mnt->mnt_parent;
+                       struct dentry *mountpoint = mnt->mnt_mountpoint;
+                       struct inode *inode2 = mountpoint->d_inode;
+                       unsigned seq = read_seqcount_begin(&mountpoint->d_seq);
+                       if (unlikely(read_seqretry(&mount_lock, nd->m_seq)))
+                               return -ECHILD;
+                       if (&mparent->mnt == nd->path.mnt)
+                               break;
+                       /* we know that mountpoint was pinned */
+                       nd->path.dentry = mountpoint;
+                       nd->path.mnt = &mparent->mnt;
+                       inode = inode2;
+                       nd->seq = seq;
                }
-               if (!follow_up_rcu(&nd->path))
-                       break;
-               inode = nd->path.dentry->d_inode;
-               nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
        }
-       while (d_mountpoint(nd->path.dentry)) {
+       while (unlikely(d_mountpoint(nd->path.dentry))) {
                struct mount *mounted;
                mounted = __lookup_mnt(nd->path.mnt, nd->path.dentry);
+               if (unlikely(read_seqretry(&mount_lock, nd->m_seq)))
+                       return -ECHILD;
                if (!mounted)
                        break;
                nd->path.mnt = &mounted->mnt;
                nd->path.dentry = mounted->mnt.mnt_root;
                inode = nd->path.dentry->d_inode;
                nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
-               if (read_seqretry(&mount_lock, nd->m_seq))
-                       goto failed;
        }
        nd->inode = inode;
        return 0;
-
-failed:
-       nd->flags &= ~LOOKUP_RCU;
-       if (!(nd->flags & LOOKUP_ROOT))
-               nd->root.mnt = NULL;
-       rcu_read_unlock();
-       return -ECHILD;
 }
 
 /*
@@ -1400,7 +1507,8 @@ static struct dentry *__lookup_hash(struct qstr *name,
  *  It _is_ time-critical.
  */
 static int lookup_fast(struct nameidata *nd,
-                      struct path *path, struct inode **inode)
+                      struct path *path, struct inode **inode,
+                      unsigned *seqp)
 {
        struct vfsmount *mnt = nd->path.mnt;
        struct dentry *dentry, *parent = nd->path.dentry;
@@ -1415,6 +1523,7 @@ static int lookup_fast(struct nameidata *nd,
         */
        if (nd->flags & LOOKUP_RCU) {
                unsigned seq;
+               bool negative;
                dentry = __d_lookup_rcu(parent, &nd->last, &seq);
                if (!dentry)
                        goto unlazy;
@@ -1423,9 +1532,12 @@ static int lookup_fast(struct nameidata *nd,
                 * This sequence count validates that the inode matches
                 * the dentry name information from lookup.
                 */
-               *inode = dentry->d_inode;
+               *inode = d_backing_inode(dentry);
+               negative = d_is_negative(dentry);
                if (read_seqcount_retry(&dentry->d_seq, seq))
                        return -ECHILD;
+               if (negative)
+                       return -ENOENT;
 
                /*
                 * This sequence count validates that the parent had no
@@ -1436,8 +1548,8 @@ static int lookup_fast(struct nameidata *nd,
                 */
                if (__read_seqcount_retry(&parent->d_seq, nd->seq))
                        return -ECHILD;
-               nd->seq = seq;
 
+               *seqp = seq;
                if (unlikely(dentry->d_flags & DCACHE_OP_REVALIDATE)) {
                        status = d_revalidate(dentry, nd->flags);
                        if (unlikely(status <= 0)) {
@@ -1448,10 +1560,10 @@ static int lookup_fast(struct nameidata *nd,
                }
                path->mnt = mnt;
                path->dentry = dentry;
-               if (likely(__follow_mount_rcu(nd, path, inode)))
+               if (likely(__follow_mount_rcu(nd, path, inode, seqp)))
                        return 0;
 unlazy:
-               if (unlazy_walk(nd, dentry))
+               if (unlazy_walk(nd, dentry, seq))
                        return -ECHILD;
        } else {
                dentry = __d_lookup(parent, &nd->last);
@@ -1472,17 +1584,16 @@ unlazy:
                goto need_lookup;
        }
 
+       if (unlikely(d_is_negative(dentry))) {
+               dput(dentry);
+               return -ENOENT;
+       }
        path->mnt = mnt;
        path->dentry = dentry;
-       err = follow_managed(path, nd->flags);
-       if (unlikely(err < 0)) {
-               path_put_conditional(path, nd);
-               return err;
-       }
-       if (err)
-               nd->flags |= LOOKUP_JUMPED;
-       *inode = path->dentry->d_inode;
-       return 0;
+       err = follow_managed(path, nd);
+       if (likely(!err))
+               *inode = d_backing_inode(path->dentry);
+       return err;
 
 need_lookup:
        return 1;
@@ -1492,7 +1603,6 @@ need_lookup:
 static int lookup_slow(struct nameidata *nd, struct path *path)
 {
        struct dentry *dentry, *parent;
-       int err;
 
        parent = nd->path.dentry;
        BUG_ON(nd->inode != parent->d_inode);
@@ -1504,14 +1614,7 @@ static int lookup_slow(struct nameidata *nd, struct path *path)
                return PTR_ERR(dentry);
        path->mnt = nd->path.mnt;
        path->dentry = dentry;
-       err = follow_managed(path, nd->flags);
-       if (unlikely(err < 0)) {
-               path_put_conditional(path, nd);
-               return err;
-       }
-       if (err)
-               nd->flags |= LOOKUP_JUMPED;
-       return 0;
+       return follow_managed(path, nd);
 }
 
 static inline int may_lookup(struct nameidata *nd)
@@ -1520,7 +1623,7 @@ static inline int may_lookup(struct nameidata *nd)
                int err = inode_permission(nd->inode, MAY_EXEC|MAY_NOT_BLOCK);
                if (err != -ECHILD)
                        return err;
-               if (unlazy_walk(nd, NULL))
+               if (unlazy_walk(nd, NULL, 0))
                        return -ECHILD;
        }
        return inode_permission(nd->inode, MAY_EXEC);
@@ -1530,24 +1633,45 @@ static inline int handle_dots(struct nameidata *nd, int type)
 {
        if (type == LAST_DOTDOT) {
                if (nd->flags & LOOKUP_RCU) {
-                       if (follow_dotdot_rcu(nd))
-                               return -ECHILD;
+                       return follow_dotdot_rcu(nd);
                } else
                        follow_dotdot(nd);
        }
        return 0;
 }
 
-static void terminate_walk(struct nameidata *nd)
+static int pick_link(struct nameidata *nd, struct path *link,
+                    struct inode *inode, unsigned seq)
 {
+       int error;
+       struct saved *last;
+       if (unlikely(nd->total_link_count++ >= MAXSYMLINKS)) {
+               path_to_nameidata(link, nd);
+               return -ELOOP;
+       }
        if (!(nd->flags & LOOKUP_RCU)) {
-               path_put(&nd->path);
-       } else {
-               nd->flags &= ~LOOKUP_RCU;
-               if (!(nd->flags & LOOKUP_ROOT))
-                       nd->root.mnt = NULL;
-               rcu_read_unlock();
+               if (link->mnt == nd->path.mnt)
+                       mntget(link->mnt);
+       }
+       error = nd_alloc_stack(nd);
+       if (unlikely(error)) {
+               if (error == -ECHILD) {
+                       if (unlikely(unlazy_link(nd, link, seq)))
+                               return -ECHILD;
+                       error = nd_alloc_stack(nd);
+               }
+               if (error) {
+                       path_put(link);
+                       return error;
+               }
        }
+
+       last = nd->stack + nd->depth++;
+       last->link = *link;
+       last->cookie = NULL;
+       last->inode = inode;
+       last->seq = seq;
+       return 1;
 }
 
 /*
@@ -1556,97 +1680,67 @@ static void terminate_walk(struct nameidata *nd)
  * so we keep a cache of "no, this doesn't need follow_link"
  * for the common case.
  */
-static inline int should_follow_link(struct dentry *dentry, int follow)
+static inline int should_follow_link(struct nameidata *nd, struct path *link,
+                                    int follow,
+                                    struct inode *inode, unsigned seq)
 {
-       return unlikely(d_is_symlink(dentry)) ? follow : 0;
+       if (likely(!d_is_symlink(link->dentry)))
+               return 0;
+       if (!follow)
+               return 0;
+       return pick_link(nd, link, inode, seq);
 }
 
-static inline int walk_component(struct nameidata *nd, struct path *path,
-               int follow)
+enum {WALK_GET = 1, WALK_PUT = 2};
+
+static int walk_component(struct nameidata *nd, int flags)
 {
+       struct path path;
        struct inode *inode;
+       unsigned seq;
        int err;
        /*
         * "." and ".." are special - ".." especially so because it has
         * to be able to know about the current root directory and
         * parent relationships.
         */
-       if (unlikely(nd->last_type != LAST_NORM))
-               return handle_dots(nd, nd->last_type);
-       err = lookup_fast(nd, path, &inode);
+       if (unlikely(nd->last_type != LAST_NORM)) {
+               err = handle_dots(nd, nd->last_type);
+               if (flags & WALK_PUT)
+                       put_link(nd);
+               return err;
+       }
+       err = lookup_fast(nd, &path, &inode, &seq);
        if (unlikely(err)) {
                if (err < 0)
-                       goto out_err;
+                       return err;
 
-               err = lookup_slow(nd, path);
+               err = lookup_slow(nd, &path);
                if (err < 0)
-                       goto out_err;
+                       return err;
 
-               inode = path->dentry->d_inode;
+               inode = d_backing_inode(path.dentry);
+               seq = 0;        /* we are already out of RCU mode */
+               err = -ENOENT;
+               if (d_is_negative(path.dentry))
+                       goto out_path_put;
        }
-       err = -ENOENT;
-       if (d_is_negative(path->dentry))
-               goto out_path_put;
 
-       if (should_follow_link(path->dentry, follow)) {
-               if (nd->flags & LOOKUP_RCU) {
-                       if (unlikely(nd->path.mnt != path->mnt ||
-                                    unlazy_walk(nd, path->dentry))) {
-                               err = -ECHILD;
-                               goto out_err;
-                       }
-               }
-               BUG_ON(inode != path->dentry->d_inode);
-               return 1;
-       }
-       path_to_nameidata(path, nd);
+       if (flags & WALK_PUT)
+               put_link(nd);
+       err = should_follow_link(nd, &path, flags & WALK_GET, inode, seq);
+       if (unlikely(err))
+               return err;
+       path_to_nameidata(&path, nd);
        nd->inode = inode;
+       nd->seq = seq;
        return 0;
 
 out_path_put:
-       path_to_nameidata(path, nd);
-out_err:
-       terminate_walk(nd);
+       path_to_nameidata(&path, nd);
        return err;
 }
 
-/*
- * This limits recursive symlink follows to 8, while
- * limiting consecutive symlinks to 40.
- *
- * Without that kind of total limit, nasty chains of consecutive
- * symlinks can cause almost arbitrarily long lookups.
- */
-static inline int nested_symlink(struct path *path, struct nameidata *nd)
-{
-       int res;
-
-       if (unlikely(current->link_count >= MAX_NESTED_LINKS)) {
-               path_put_conditional(path, nd);
-               path_put(&nd->path);
-               return -ELOOP;
-       }
-       BUG_ON(nd->depth >= MAX_NESTED_LINKS);
-
-       nd->depth++;
-       current->link_count++;
-
-       do {
-               struct path link = *path;
-               void *cookie;
-
-               res = follow_link(&link, nd, &cookie);
-               if (res)
-                       break;
-               res = walk_component(nd, path, LOOKUP_FOLLOW);
-               put_link(nd, &link, cookie);
-       } while (res > 0);
-
-       current->link_count--;
-       nd->depth--;
-       return res;
-}
-
 /*
  * We can do the critical dentry name comparison and hashing
  * operations one word at a time, but we are limited to:
@@ -1773,9 +1867,8 @@ static inline u64 hash_name(const char *name)
  */
 static int link_path_walk(const char *name, struct nameidata *nd)
 {
-       struct path next;
        int err;
-       
+
        while (*name=='/')
                name++;
        if (!*name)
@@ -1788,7 +1881,7 @@ static int link_path_walk(const char *name, struct nameidata *nd)
 
                err = may_lookup(nd);
                if (err)
-                       break;
+                       return err;
 
                hash_len = hash_name(name);
 
@@ -1810,7 +1903,7 @@ static int link_path_walk(const char *name, struct nameidata *nd)
                                struct qstr this = { { .hash_len = hash_len }, .name = name };
                                err = parent->d_op->d_hash(parent, &this);
                                if (err < 0)
-                                       break;
+                                       return err;
                                hash_len = this.hash_len;
                                name = this.name;
                        }
@@ -1822,7 +1915,7 @@ static int link_path_walk(const char *name, struct nameidata *nd)
 
                name += hashlen_len(hash_len);
                if (!*name)
-                       return 0;
+                       goto OK;
                /*
                 * If it wasn't NUL, we know it was '/'. Skip that
                 * slash, and continue until no more slashes.
@@ -1830,57 +1923,73 @@ static int link_path_walk(const char *name, struct nameidata *nd)
                do {
                        name++;
                } while (unlikely(*name == '/'));
-               if (!*name)
-                       return 0;
-
-               err = walk_component(nd, &next, LOOKUP_FOLLOW);
+               if (unlikely(!*name)) {
+OK:
+                       /* pathname body, done */
+                       if (!nd->depth)
+                               return 0;
+                       name = nd->stack[nd->depth - 1].name;
+                       /* trailing symlink, done */
+                       if (!name)
+                               return 0;
+                       /* last component of nested symlink */
+                       err = walk_component(nd, WALK_GET | WALK_PUT);
+               } else {
+                       err = walk_component(nd, WALK_GET);
+               }
                if (err < 0)
                        return err;
 
                if (err) {
-                       err = nested_symlink(&next, nd);
-                       if (err)
-                               return err;
-               }
-               if (!d_can_lookup(nd->path.dentry)) {
-                       err = -ENOTDIR; 
-                       break;
+                       const char *s = get_link(nd);
+
+                       if (unlikely(IS_ERR(s)))
+                               return PTR_ERR(s);
+                       err = 0;
+                       if (unlikely(!s)) {
+                               /* jumped */
+                               put_link(nd);
+                       } else {
+                               nd->stack[nd->depth - 1].name = name;
+                               name = s;
+                               continue;
+                       }
                }
+               if (unlikely(!d_can_lookup(nd->path.dentry)))
+                       return -ENOTDIR;
        }
-       terminate_walk(nd);
-       return err;
 }
 
-static int path_init(int dfd, const struct filename *name, unsigned int flags,
-                    struct nameidata *nd)
+static const char *path_init(struct nameidata *nd, unsigned flags)
 {
        int retval = 0;
-       const char *s = name->name;
+       const char *s = nd->name->name;
 
        nd->last_type = LAST_ROOT; /* if there are only slashes... */
        nd->flags = flags | LOOKUP_JUMPED | LOOKUP_PARENT;
        nd->depth = 0;
-       nd->base = NULL;
+       nd->total_link_count = 0;
        if (flags & LOOKUP_ROOT) {
                struct dentry *root = nd->root.dentry;
                struct inode *inode = root->d_inode;
                if (*s) {
                        if (!d_can_lookup(root))
-                               return -ENOTDIR;
+                               return ERR_PTR(-ENOTDIR);
                        retval = inode_permission(inode, MAY_EXEC);
                        if (retval)
-                               return retval;
+                               return ERR_PTR(retval);
                }
                nd->path = nd->root;
                nd->inode = inode;
                if (flags & LOOKUP_RCU) {
                        rcu_read_lock();
                        nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
+                       nd->root_seq = nd->seq;
                        nd->m_seq = read_seqbegin(&mount_lock);
                } else {
                        path_get(&nd->path);
                }
-               goto done;
+               return s;
        }
 
        nd->root.mnt = NULL;
@@ -1895,7 +2004,7 @@ static int path_init(int dfd, const struct filename *name, unsigned int flags,
                        path_get(&nd->root);
                }
                nd->path = nd->root;
-       } else if (dfd == AT_FDCWD) {
+       } else if (nd->dfd == AT_FDCWD) {
                if (flags & LOOKUP_RCU) {
                        struct fs_struct *fs = current->fs;
                        unsigned seq;
@@ -1912,180 +2021,205 @@ static int path_init(int dfd, const struct filename *name, unsigned int flags,
                }
        } else {
                /* Caller must check execute permissions on the starting path component */
-               struct fd f = fdget_raw(dfd);
+               struct fd f = fdget_raw(nd->dfd);
                struct dentry *dentry;
 
                if (!f.file)
-                       return -EBADF;
+                       return ERR_PTR(-EBADF);
 
                dentry = f.file->f_path.dentry;
 
                if (*s) {
                        if (!d_can_lookup(dentry)) {
                                fdput(f);
-                               return -ENOTDIR;
+                               return ERR_PTR(-ENOTDIR);
                        }
                }
 
                nd->path = f.file->f_path;
                if (flags & LOOKUP_RCU) {
-                       if (f.flags & FDPUT_FPUT)
-                               nd->base = f.file;
-                       nd->seq = __read_seqcount_begin(&nd->path.dentry->d_seq);
                        rcu_read_lock();
+                       nd->inode = nd->path.dentry->d_inode;
+                       nd->seq = read_seqcount_begin(&nd->path.dentry->d_seq);
                } else {
                        path_get(&nd->path);
-                       fdput(f);
+                       nd->inode = nd->path.dentry->d_inode;
                }
+               fdput(f);
+               return s;
        }
 
        nd->inode = nd->path.dentry->d_inode;
        if (!(flags & LOOKUP_RCU))
-               goto done;
+               return s;
        if (likely(!read_seqcount_retry(&nd->path.dentry->d_seq, nd->seq)))
-               goto done;
+               return s;
        if (!(nd->flags & LOOKUP_ROOT))
                nd->root.mnt = NULL;
        rcu_read_unlock();
-       return -ECHILD;
-done:
-       current->total_link_count = 0;
-       return link_path_walk(s, nd);
+       return ERR_PTR(-ECHILD);
 }
 
-static void path_cleanup(struct nameidata *nd)
+static const char *trailing_symlink(struct nameidata *nd)
 {
-       if (nd->root.mnt && !(nd->flags & LOOKUP_ROOT)) {
-               path_put(&nd->root);
-               nd->root.mnt = NULL;
-       }
-       if (unlikely(nd->base))
-               fput(nd->base);
+       const char *s;
+       int error = may_follow_link(nd);
+       if (unlikely(error))
+               return ERR_PTR(error);
+       nd->flags |= LOOKUP_PARENT;
+       nd->stack[0].name = NULL;
+       s = get_link(nd);
+       return s ? s : "";
 }
 
-static inline int lookup_last(struct nameidata *nd, struct path *path)
+static inline int lookup_last(struct nameidata *nd)
 {
        if (nd->last_type == LAST_NORM && nd->last.name[nd->last.len])
                nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
 
        nd->flags &= ~LOOKUP_PARENT;
-       return walk_component(nd, path, nd->flags & LOOKUP_FOLLOW);
+       return walk_component(nd,
+                       nd->flags & LOOKUP_FOLLOW
+                               ? nd->depth
+                                       ? WALK_PUT | WALK_GET
+                                       : WALK_GET
+                               : 0);
 }
 
 /* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
-static int path_lookupat(int dfd, const struct filename *name,
-                               unsigned int flags, struct nameidata *nd)
+static int path_lookupat(struct nameidata *nd, unsigned flags, struct path *path)
 {
-       struct path path;
+       const char *s = path_init(nd, flags);
        int err;
 
-       /*
-        * Path walking is largely split up into 2 different synchronisation
-        * schemes, rcu-walk and ref-walk (explained in
-        * Documentation/filesystems/path-lookup.txt). These share much of the
-        * path walk code, but some things particularly setup, cleanup, and
-        * following mounts are sufficiently divergent that functions are
-        * duplicated. Typically there is a function foo(), and its RCU
-        * analogue, foo_rcu().
-        *
-        * -ECHILD is the error number of choice (just to avoid clashes) that
-        * is returned if some aspect of an rcu-walk fails. Such an error must
-        * be handled by restarting a traditional ref-walk (which will always
-        * be able to complete).
-        */
-       err = path_init(dfd, name, flags, nd);
-       if (!err && !(flags & LOOKUP_PARENT)) {
-               err = lookup_last(nd, &path);
-               while (err > 0) {
-                       void *cookie;
-                       struct path link = path;
-                       err = may_follow_link(&link, nd);
-                       if (unlikely(err))
-                               break;
-                       nd->flags |= LOOKUP_PARENT;
-                       err = follow_link(&link, nd, &cookie);
-                       if (err)
-                               break;
-                       err = lookup_last(nd, &path);
-                       put_link(nd, &link, cookie);
+       if (IS_ERR(s))
+               return PTR_ERR(s);
+       while (!(err = link_path_walk(s, nd))
+               && ((err = lookup_last(nd)) > 0)) {
+               s = trailing_symlink(nd);
+               if (IS_ERR(s)) {
+                       err = PTR_ERR(s);
+                       break;
                }
        }
-
        if (!err)
                err = complete_walk(nd);
 
-       if (!err && nd->flags & LOOKUP_DIRECTORY) {
-               if (!d_can_lookup(nd->path.dentry)) {
-                       path_put(&nd->path);
+       if (!err && nd->flags & LOOKUP_DIRECTORY)
+               if (!d_can_lookup(nd->path.dentry))
                        err = -ENOTDIR;
-               }
+       if (!err) {
+               *path = nd->path;
+               nd->path.mnt = NULL;
+               nd->path.dentry = NULL;
        }
-
-       path_cleanup(nd);
+       terminate_walk(nd);
        return err;
 }
 
-static int filename_lookup(int dfd, struct filename *name,
-                               unsigned int flags, struct nameidata *nd)
+static int filename_lookup(int dfd, struct filename *name, unsigned flags,
+                          struct path *path, struct path *root)
 {
-       int retval = path_lookupat(dfd, name, flags | LOOKUP_RCU, nd);
+       int retval;
+       struct nameidata nd;
+       if (IS_ERR(name))
+               return PTR_ERR(name);
+       if (unlikely(root)) {
+               nd.root = *root;
+               flags |= LOOKUP_ROOT;
+       }
+       set_nameidata(&nd, dfd, name);
+       retval = path_lookupat(&nd, flags | LOOKUP_RCU, path);
        if (unlikely(retval == -ECHILD))
-               retval = path_lookupat(dfd, name, flags, nd);
+               retval = path_lookupat(&nd, flags, path);
        if (unlikely(retval == -ESTALE))
-               retval = path_lookupat(dfd, name, flags | LOOKUP_REVAL, nd);
+               retval = path_lookupat(&nd, flags | LOOKUP_REVAL, path);
 
        if (likely(!retval))
-               audit_inode(name, nd->path.dentry, flags & LOOKUP_PARENT);
+               audit_inode(name, path->dentry, flags & LOOKUP_PARENT);
+       restore_nameidata();
+       putname(name);
        return retval;
 }
 
+/* Returns 0 and nd will be valid on success; Retuns error, otherwise. */
+static int path_parentat(struct nameidata *nd, unsigned flags,
+                               struct path *parent)
+{
+       const char *s = path_init(nd, flags);
+       int err;
+       if (IS_ERR(s))
+               return PTR_ERR(s);
+       err = link_path_walk(s, nd);
+       if (!err)
+               err = complete_walk(nd);
+       if (!err) {
+               *parent = nd->path;
+               nd->path.mnt = NULL;
+               nd->path.dentry = NULL;
+       }
+       terminate_walk(nd);
+       return err;
+}
+
+static struct filename *filename_parentat(int dfd, struct filename *name,
+                               unsigned int flags, struct path *parent,
+                               struct qstr *last, int *type)
+{
+       int retval;
+       struct nameidata nd;
+
+       if (IS_ERR(name))
+               return name;
+       set_nameidata(&nd, dfd, name);
+       retval = path_parentat(&nd, flags | LOOKUP_RCU, parent);
+       if (unlikely(retval == -ECHILD))
+               retval = path_parentat(&nd, flags, parent);
+       if (unlikely(retval == -ESTALE))
+               retval = path_parentat(&nd, flags | LOOKUP_REVAL, parent);
+       if (likely(!retval)) {
+               *last = nd.last;
+               *type = nd.last_type;
+               audit_inode(name, parent->dentry, LOOKUP_PARENT);
+       } else {
+               putname(name);
+               name = ERR_PTR(retval);
+       }
+       restore_nameidata();
+       return name;
+}
+
 /* does lookup, returns the object with parent locked */
 struct dentry *kern_path_locked(const char *name, struct path *path)
 {
-       struct filename *filename = getname_kernel(name);
-       struct nameidata nd;
+       struct filename *filename;
        struct dentry *d;
-       int err;
+       struct qstr last;
+       int type;
 
+       filename = filename_parentat(AT_FDCWD, getname_kernel(name), 0, path,
+                                   &last, &type);
        if (IS_ERR(filename))
                return ERR_CAST(filename);
-
-       err = filename_lookup(AT_FDCWD, filename, LOOKUP_PARENT, &nd);
-       if (err) {
-               d = ERR_PTR(err);
-               goto out;
-       }
-       if (nd.last_type != LAST_NORM) {
-               path_put(&nd.path);
-               d = ERR_PTR(-EINVAL);
-               goto out;
+       if (unlikely(type != LAST_NORM)) {
+               path_put(path);
+               putname(filename);
+               return ERR_PTR(-EINVAL);
        }
-       mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
-       d = __lookup_hash(&nd.last, nd.path.dentry, 0);
+       mutex_lock_nested(&path->dentry->d_inode->i_mutex, I_MUTEX_PARENT);
+       d = __lookup_hash(&last, path->dentry, 0);
        if (IS_ERR(d)) {
-               mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
-               path_put(&nd.path);
-               goto out;
+               mutex_unlock(&path->dentry->d_inode->i_mutex);
+               path_put(path);
        }
-       *path = nd.path;
-out:
        putname(filename);
        return d;
 }
 
 int kern_path(const char *name, unsigned int flags, struct path *path)
 {
-       struct nameidata nd;
-       struct filename *filename = getname_kernel(name);
-       int res = PTR_ERR(filename);
-
-       if (!IS_ERR(filename)) {
-               res = filename_lookup(AT_FDCWD, filename, flags, &nd);
-               putname(filename);
-               if (!res)
-                       *path = nd.path;
-       }
-       return res;
+       return filename_lookup(AT_FDCWD, getname_kernel(name),
+                              flags, path, NULL);
 }
 EXPORT_SYMBOL(kern_path);
 
@@ -2101,36 +2235,13 @@ int vfs_path_lookup(struct dentry *dentry, struct vfsmount *mnt,
                    const char *name, unsigned int flags,
                    struct path *path)
 {
-       struct filename *filename = getname_kernel(name);
-       int err = PTR_ERR(filename);
-
-       BUG_ON(flags & LOOKUP_PARENT);
-
-       /* the first argument of filename_lookup() is ignored with LOOKUP_ROOT */
-       if (!IS_ERR(filename)) {
-               struct nameidata nd;
-               nd.root.dentry = dentry;
-               nd.root.mnt = mnt;
-               err = filename_lookup(AT_FDCWD, filename,
-                                     flags | LOOKUP_ROOT, &nd);
-               if (!err)
-                       *path = nd.path;
-               putname(filename);
-       }
-       return err;
+       struct path root = {.mnt = mnt, .dentry = dentry};
+       /* the first argument of filename_lookup() is ignored with root */
+       return filename_lookup(AT_FDCWD, getname_kernel(name),
+                              flags , path, &root);
 }
 EXPORT_SYMBOL(vfs_path_lookup);
 
-/*
- * Restricted form of lookup. Doesn't follow links, single-component only,
- * needs parent already locked. Doesn't follow mounts.
- * SMP-safe.
- */
-static struct dentry *lookup_hash(struct nameidata *nd)
-{
-       return __lookup_hash(&nd->last, nd->path.dentry, nd->flags);
-}
-
 /**
  * lookup_one_len - filesystem helper to lookup single pathname component
  * @name:      pathname component to lookup
@@ -2185,27 +2296,10 @@ EXPORT_SYMBOL(lookup_one_len);
 int user_path_at_empty(int dfd, const char __user *name, unsigned flags,
                 struct path *path, int *empty)
 {
-       struct nameidata nd;
-       struct filename *tmp = getname_flags(name, flags, empty);
-       int err = PTR_ERR(tmp);
-       if (!IS_ERR(tmp)) {
-
-               BUG_ON(flags & LOOKUP_PARENT);
-
-               err = filename_lookup(dfd, tmp, flags, &nd);
-               putname(tmp);
-               if (!err)
-                       *path = nd.path;
-       }
-       return err;
-}
-
-int user_path_at(int dfd, const char __user *name, unsigned flags,
-                struct path *path)
-{
-       return user_path_at_empty(dfd, name, flags, path, NULL);
+       return filename_lookup(dfd, getname_flags(name, flags, empty),
+                              flags, path, NULL);
 }
-EXPORT_SYMBOL(user_path_at);
+EXPORT_SYMBOL(user_path_at_empty);
 
 /*
  * NB: most callers don't do anything directly with the reference to the
@@ -2213,26 +2307,16 @@ EXPORT_SYMBOL(user_path_at);
  *     allocated by getname. So we must hold the reference to it until all
  *     path-walking is complete.
  */
-static struct filename *
-user_path_parent(int dfd, const char __user *path, struct nameidata *nd,
+static inline struct filename *
+user_path_parent(int dfd, const char __user *path,
+                struct path *parent,
+                struct qstr *last,
+                int *type,
                 unsigned int flags)
 {
-       struct filename *s = getname(path);
-       int error;
-
        /* only LOOKUP_REVAL is allowed in extra flags */
-       flags &= LOOKUP_REVAL;
-
-       if (IS_ERR(s))
-               return s;
-
-       error = filename_lookup(dfd, s, flags | LOOKUP_PARENT, nd);
-       if (error) {
-               putname(s);
-               return ERR_PTR(error);
-       }
-
-       return s;
+       return filename_parentat(dfd, getname(path), flags & LOOKUP_REVAL,
+                                parent, last, type);
 }
 
 /**
@@ -2271,10 +2355,8 @@ mountpoint_last(struct nameidata *nd, struct path *path)
 
        /* If we're in rcuwalk, drop out of it to handle last component */
        if (nd->flags & LOOKUP_RCU) {
-               if (unlazy_walk(nd, NULL)) {
-                       error = -ECHILD;
-                       goto out;
-               }
+               if (unlazy_walk(nd, NULL, 0))
+                       return -ECHILD;
        }
 
        nd->flags &= ~LOOKUP_PARENT;
@@ -2282,7 +2364,7 @@ mountpoint_last(struct nameidata *nd, struct path *path)
        if (unlikely(nd->last_type != LAST_NORM)) {
                error = handle_dots(nd, nd->last_type);
                if (error)
-                       goto out;
+                       return error;
                dentry = dget(nd->path.dentry);
                goto done;
        }
@@ -2297,74 +2379,60 @@ mountpoint_last(struct nameidata *nd, struct path *path)
                 */
                dentry = d_alloc(dir, &nd->last);
                if (!dentry) {
-                       error = -ENOMEM;
                        mutex_unlock(&dir->d_inode->i_mutex);
-                       goto out;
+                       return -ENOMEM;
                }
                dentry = lookup_real(dir->d_inode, dentry, nd->flags);
-               error = PTR_ERR(dentry);
                if (IS_ERR(dentry)) {
                        mutex_unlock(&dir->d_inode->i_mutex);
-                       goto out;
+                       return PTR_ERR(dentry);
                }
        }
        mutex_unlock(&dir->d_inode->i_mutex);
 
 done:
        if (d_is_negative(dentry)) {
-               error = -ENOENT;
                dput(dentry);
-               goto out;
+               return -ENOENT;
        }
+       if (nd->depth)
+               put_link(nd);
        path->dentry = dentry;
        path->mnt = nd->path.mnt;
-       if (should_follow_link(dentry, nd->flags & LOOKUP_FOLLOW))
-               return 1;
+       error = should_follow_link(nd, path, nd->flags & LOOKUP_FOLLOW,
+                                  d_backing_inode(dentry), 0);
+       if (unlikely(error))
+               return error;
        mntget(path->mnt);
        follow_mount(path);
-       error = 0;
-out:
-       terminate_walk(nd);
-       return error;
+       return 0;
 }
 
 /**
  * path_mountpoint - look up a path to be umounted
- * @dfd:       directory file descriptor to start walk from
- * @name:      full pathname to walk
- * @path:      pointer to container for result
+ * @nameidata: lookup context
  * @flags:     lookup flags
+ * @path:      pointer to container for result
  *
  * Look up the given name, but don't attempt to revalidate the last component.
  * Returns 0 and "path" will be valid on success; Returns error otherwise.
  */
 static int
-path_mountpoint(int dfd, const struct filename *name, struct path *path,
-               unsigned int flags)
+path_mountpoint(struct nameidata *nd, unsigned flags, struct path *path)
 {
-       struct nameidata nd;
+       const char *s = path_init(nd, flags);
        int err;
-
-       err = path_init(dfd, name, flags, &nd);
-       if (unlikely(err))
-               goto out;
-
-       err = mountpoint_last(&nd, path);
-       while (err > 0) {
-               void *cookie;
-               struct path link = *path;
-               err = may_follow_link(&link, &nd);
-               if (unlikely(err))
-                       break;
-               nd.flags |= LOOKUP_PARENT;
-               err = follow_link(&link, &nd, &cookie);
-               if (err)
+       if (IS_ERR(s))
+               return PTR_ERR(s);
+       while (!(err = link_path_walk(s, nd)) &&
+               (err = mountpoint_last(nd, path)) > 0) {
+               s = trailing_symlink(nd);
+               if (IS_ERR(s)) {
+                       err = PTR_ERR(s);
                        break;
-               err = mountpoint_last(&nd, path);
-               put_link(&nd, &link, cookie);
+               }
        }
-out:
-       path_cleanup(&nd);
+       terminate_walk(nd);
        return err;
 }
 
@@ -2372,16 +2440,19 @@ static int
 filename_mountpoint(int dfd, struct filename *name, struct path *path,
                        unsigned int flags)
 {
+       struct nameidata nd;
        int error;
        if (IS_ERR(name))
                return PTR_ERR(name);
-       error = path_mountpoint(dfd, name, path, flags | LOOKUP_RCU);
+       set_nameidata(&nd, dfd, name);
+       error = path_mountpoint(&nd, flags | LOOKUP_RCU, path);
        if (unlikely(error == -ECHILD))
-               error = path_mountpoint(dfd, name, path, flags);
+               error = path_mountpoint(&nd, flags, path);
        if (unlikely(error == -ESTALE))
-               error = path_mountpoint(dfd, name, path, flags | LOOKUP_REVAL);
+               error = path_mountpoint(&nd, flags | LOOKUP_REVAL, path);
        if (likely(!error))
                audit_inode(name, path->dentry, 0);
+       restore_nameidata();
        putname(name);
        return error;
 }
@@ -2448,7 +2519,7 @@ EXPORT_SYMBOL(__check_sticky);
  */
 static int may_delete(struct inode *dir, struct dentry *victim, bool isdir)
 {
-       struct inode *inode = victim->d_inode;
+       struct inode *inode = d_backing_inode(victim);
        int error;
 
        if (d_is_negative(victim))
@@ -2914,18 +2985,19 @@ out_dput:
 /*
  * Handle the last step of open()
  */
-static int do_last(struct nameidata *nd, struct path *path,
+static int do_last(struct nameidata *nd,
                   struct file *file, const struct open_flags *op,
-                  int *opened, struct filename *name)
+                  int *opened)
 {
        struct dentry *dir = nd->path.dentry;
        int open_flag = op->open_flag;
        bool will_truncate = (open_flag & O_TRUNC) != 0;
        bool got_write = false;
        int acc_mode = op->acc_mode;
+       unsigned seq;
        struct inode *inode;
-       bool symlink_ok = false;
        struct path save_parent = { .dentry = NULL, .mnt = NULL };
+       struct path path;
        bool retried = false;
        int error;
 
@@ -2934,7 +3006,7 @@ static int do_last(struct nameidata *nd, struct path *path,
 
        if (nd->last_type != LAST_NORM) {
                error = handle_dots(nd, nd->last_type);
-               if (error)
+               if (unlikely(error))
                        return error;
                goto finish_open;
        }
@@ -2942,15 +3014,13 @@ static int do_last(struct nameidata *nd, struct path *path,
        if (!(open_flag & O_CREAT)) {
                if (nd->last.name[nd->last.len])
                        nd->flags |= LOOKUP_FOLLOW | LOOKUP_DIRECTORY;
-               if (open_flag & O_PATH && !(nd->flags & LOOKUP_FOLLOW))
-                       symlink_ok = true;
                /* we _can_ be in RCU mode here */
-               error = lookup_fast(nd, path, &inode);
+               error = lookup_fast(nd, &path, &inode, &seq);
                if (likely(!error))
                        goto finish_lookup;
 
                if (error < 0)
-                       goto out;
+                       return error;
 
                BUG_ON(nd->inode != dir->d_inode);
        } else {
@@ -2964,11 +3034,10 @@ static int do_last(struct nameidata *nd, struct path *path,
                if (error)
                        return error;
 
-               audit_inode(name, dir, LOOKUP_PARENT);
-               error = -EISDIR;
+               audit_inode(nd->name, dir, LOOKUP_PARENT);
                /* trailing slashes? */
-               if (nd->last.name[nd->last.len])
-                       goto out;
+               if (unlikely(nd->last.name[nd->last.len]))
+                       return -EISDIR;
        }
 
 retry_lookup:
@@ -2983,7 +3052,7 @@ retry_lookup:
                 */
        }
        mutex_lock(&dir->d_inode->i_mutex);
-       error = lookup_open(nd, path, file, op, got_write, opened);
+       error = lookup_open(nd, &path, file, op, got_write, opened);
        mutex_unlock(&dir->d_inode->i_mutex);
 
        if (error <= 0) {
@@ -2994,7 +3063,7 @@ retry_lookup:
                    !S_ISREG(file_inode(file)->i_mode))
                        will_truncate = false;
 
-               audit_inode(name, file->f_path.dentry, 0);
+               audit_inode(nd->name, file->f_path.dentry, 0);
                goto opened;
        }
 
@@ -3003,15 +3072,15 @@ retry_lookup:
                open_flag &= ~O_TRUNC;
                will_truncate = false;
                acc_mode = MAY_OPEN;
-               path_to_nameidata(path, nd);
+               path_to_nameidata(&path, nd);
                goto finish_open_created;
        }
 
        /*
         * create/update audit record if it already exists.
         */
-       if (d_is_positive(path->dentry))
-               audit_inode(name, path->dentry, 0);
+       if (d_is_positive(path.dentry))
+               audit_inode(nd->name, path.dentry, 0);
 
        /*
         * If atomic_open() acquired write access it is dropped now due to
@@ -3023,48 +3092,45 @@ retry_lookup:
                got_write = false;
        }
 
-       error = -EEXIST;
-       if ((open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT))
-               goto exit_dput;
-
-       error = follow_managed(path, nd->flags);
-       if (error < 0)
-               goto exit_dput;
+       if (unlikely((open_flag & (O_EXCL | O_CREAT)) == (O_EXCL | O_CREAT))) {
+               path_to_nameidata(&path, nd);
+               return -EEXIST;
+       }
 
-       if (error)
-               nd->flags |= LOOKUP_JUMPED;
+       error = follow_managed(&path, nd);
+       if (unlikely(error < 0))
+               return error;
 
        BUG_ON(nd->flags & LOOKUP_RCU);
-       inode = path->dentry->d_inode;
-finish_lookup:
-       /* we _can_ be in RCU mode here */
-       error = -ENOENT;
-       if (d_is_negative(path->dentry)) {
-               path_to_nameidata(path, nd);
-               goto out;
+       inode = d_backing_inode(path.dentry);
+       seq = 0;        /* out of RCU mode, so the value doesn't matter */
+       if (unlikely(d_is_negative(path.dentry))) {
+               path_to_nameidata(&path, nd);
+               return -ENOENT;
        }
+finish_lookup:
+       if (nd->depth)
+               put_link(nd);
+       error = should_follow_link(nd, &path, nd->flags & LOOKUP_FOLLOW,
+                                  inode, seq);
+       if (unlikely(error))
+               return error;
 
-       if (should_follow_link(path->dentry, !symlink_ok)) {
-               if (nd->flags & LOOKUP_RCU) {
-                       if (unlikely(nd->path.mnt != path->mnt ||
-                                    unlazy_walk(nd, path->dentry))) {
-                               error = -ECHILD;
-                               goto out;
-                       }
-               }
-               BUG_ON(inode != path->dentry->d_inode);
-               return 1;
+       if (unlikely(d_is_symlink(path.dentry)) && !(open_flag & O_PATH)) {
+               path_to_nameidata(&path, nd);
+               return -ELOOP;
        }
 
-       if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path->mnt) {
-               path_to_nameidata(path, nd);
+       if ((nd->flags & LOOKUP_RCU) || nd->path.mnt != path.mnt) {
+               path_to_nameidata(&path, nd);
        } else {
                save_parent.dentry = nd->path.dentry;
-               save_parent.mnt = mntget(path->mnt);
-               nd->path.dentry = path->dentry;
+               save_parent.mnt = mntget(path.mnt);
+               nd->path.dentry = path.dentry;
 
        }
        nd->inode = inode;
+       nd->seq = seq;
        /* Why this, you ask?  _Now_ we might have grown LOOKUP_JUMPED... */
 finish_open:
        error = complete_walk(nd);
@@ -3072,7 +3138,7 @@ finish_open:
                path_put(&save_parent);
                return error;
        }
-       audit_inode(name, nd->path.dentry, 0);
+       audit_inode(nd->name, nd->path.dentry, 0);
        error = -EISDIR;
        if ((open_flag & O_CREAT) && d_is_dir(nd->path.dentry))
                goto out;
@@ -3119,12 +3185,8 @@ out:
        if (got_write)
                mnt_drop_write(nd->path.mnt);
        path_put(&save_parent);
-       terminate_walk(nd);
        return error;
 
-exit_dput:
-       path_put_conditional(path, nd);
-       goto out;
 exit_fput:
        fput(file);
        goto out;
@@ -3148,50 +3210,46 @@ stale_open:
        goto retry_lookup;
 }
 
-static int do_tmpfile(int dfd, struct filename *pathname,
-               struct nameidata *nd, int flags,
+static int do_tmpfile(struct nameidata *nd, unsigned flags,
                const struct open_flags *op,
                struct file *file, int *opened)
 {
        static const struct qstr name = QSTR_INIT("/", 1);
-       struct dentry *dentry, *child;
+       struct dentry *child;
        struct inode *dir;
-       int error = path_lookupat(dfd, pathname,
-                                 flags | LOOKUP_DIRECTORY, nd);
+       struct path path;
+       int error = path_lookupat(nd, flags | LOOKUP_DIRECTORY, &path);
        if (unlikely(error))
                return error;
-       error = mnt_want_write(nd->path.mnt);
+       error = mnt_want_write(path.mnt);
        if (unlikely(error))
                goto out;
+       dir = path.dentry->d_inode;
        /* we want directory to be writable */
-       error = inode_permission(nd->inode, MAY_WRITE | MAY_EXEC);
+       error = inode_permission(dir, MAY_WRITE | MAY_EXEC);
        if (error)
                goto out2;
-       dentry = nd->path.dentry;
-       dir = dentry->d_inode;
        if (!dir->i_op->tmpfile) {
                error = -EOPNOTSUPP;
                goto out2;
        }
-       child = d_alloc(dentry, &name);
+       child = d_alloc(path.dentry, &name);
        if (unlikely(!child)) {
                error = -ENOMEM;
                goto out2;
        }
-       nd->flags &= ~LOOKUP_DIRECTORY;
-       nd->flags |= op->intent;
-       dput(nd->path.dentry);
-       nd->path.dentry = child;
-       error = dir->i_op->tmpfile(dir, nd->path.dentry, op->mode);
+       dput(path.dentry);
+       path.dentry = child;
+       error = dir->i_op->tmpfile(dir, child, op->mode);
        if (error)
                goto out2;
-       audit_inode(pathname, nd->path.dentry, 0);
+       audit_inode(nd->name, child, 0);
        /* Don't check for other permissions, the inode was just created */
-       error = may_open(&nd->path, MAY_OPEN, op->open_flag);
+       error = may_open(&path, MAY_OPEN, op->open_flag);
        if (error)
                goto out2;
-       file->f_path.mnt = nd->path.mnt;
-       error = finish_open(file, nd->path.dentry, NULL, opened);
+       file->f_path.mnt = path.mnt;
+       error = finish_open(file, child, NULL, opened);
        if (error)
                goto out2;
        error = open_check_o_direct(file);
@@ -3204,17 +3262,17 @@ static int do_tmpfile(int dfd, struct filename *pathname,
                spin_unlock(&inode->i_lock);
        }
 out2:
-       mnt_drop_write(nd->path.mnt);
+       mnt_drop_write(path.mnt);
 out:
-       path_put(&nd->path);
+       path_put(&path);
        return error;
 }
 
-static struct file *path_openat(int dfd, struct filename *pathname,
-               struct nameidata *nd, const struct open_flags *op, int flags)
+static struct file *path_openat(struct nameidata *nd,
+                       const struct open_flags *op, unsigned flags)
 {
+       const char *s;
        struct file *file;
-       struct path path;
        int opened = 0;
        int error;
 
@@ -3225,37 +3283,26 @@ static struct file *path_openat(int dfd, struct filename *pathname,
        file->f_flags = op->open_flag;
 
        if (unlikely(file->f_flags & __O_TMPFILE)) {
-               error = do_tmpfile(dfd, pathname, nd, flags, op, file, &opened);
-               goto out;
+               error = do_tmpfile(nd, flags, op, file, &opened);
+               goto out2;
        }
 
-       error = path_init(dfd, pathname, flags, nd);
-       if (unlikely(error))
-               goto out;
-
-       error = do_last(nd, &path, file, op, &opened, pathname);
-       while (unlikely(error > 0)) { /* trailing symlink */
-               struct path link = path;
-               void *cookie;
-               if (!(nd->flags & LOOKUP_FOLLOW)) {
-                       path_put_conditional(&path, nd);
-                       path_put(&nd->path);
-                       error = -ELOOP;
-                       break;
-               }
-               error = may_follow_link(&link, nd);
-               if (unlikely(error))
-                       break;
-               nd->flags |= LOOKUP_PARENT;
+       s = path_init(nd, flags);
+       if (IS_ERR(s)) {
+               put_filp(file);
+               return ERR_CAST(s);
+       }
+       while (!(error = link_path_walk(s, nd)) &&
+               (error = do_last(nd, file, op, &opened)) > 0) {
                nd->flags &= ~(LOOKUP_OPEN|LOOKUP_CREATE|LOOKUP_EXCL);
-               error = follow_link(&link, nd, &cookie);
-               if (unlikely(error))
+               s = trailing_symlink(nd);
+               if (IS_ERR(s)) {
+                       error = PTR_ERR(s);
                        break;
-               error = do_last(nd, &path, file, op, &opened, pathname);
-               put_link(nd, &link, cookie);
+               }
        }
-out:
-       path_cleanup(nd);
+       terminate_walk(nd);
+out2:
        if (!(opened & FILE_OPENED)) {
                BUG_ON(!error);
                put_filp(file);
@@ -3279,11 +3326,13 @@ struct file *do_filp_open(int dfd, struct filename *pathname,
        int flags = op->lookup_flags;
        struct file *filp;
 
-       filp = path_openat(dfd, pathname, &nd, op, flags | LOOKUP_RCU);
+       set_nameidata(&nd, dfd, pathname);
+       filp = path_openat(&nd, op, flags | LOOKUP_RCU);
        if (unlikely(filp == ERR_PTR(-ECHILD)))
-               filp = path_openat(dfd, pathname, &nd, op, flags);
+               filp = path_openat(&nd, op, flags);
        if (unlikely(filp == ERR_PTR(-ESTALE)))
-               filp = path_openat(dfd, pathname, &nd, op, flags | LOOKUP_REVAL);
+               filp = path_openat(&nd, op, flags | LOOKUP_REVAL);
+       restore_nameidata();
        return filp;
 }
 
@@ -3305,11 +3354,13 @@ struct file *do_file_open_root(struct dentry *dentry, struct vfsmount *mnt,
        if (unlikely(IS_ERR(filename)))
                return ERR_CAST(filename);
 
-       file = path_openat(-1, filename, &nd, op, flags | LOOKUP_RCU);
+       set_nameidata(&nd, -1, filename);
+       file = path_openat(&nd, op, flags | LOOKUP_RCU);
        if (unlikely(file == ERR_PTR(-ECHILD)))
-               file = path_openat(-1, filename, &nd, op, flags);
+               file = path_openat(&nd, op, flags);
        if (unlikely(file == ERR_PTR(-ESTALE)))
-               file = path_openat(-1, filename, &nd, op, flags | LOOKUP_REVAL);
+               file = path_openat(&nd, op, flags | LOOKUP_REVAL);
+       restore_nameidata();
        putname(filename);
        return file;
 }
@@ -3318,7 +3369,8 @@ static struct dentry *filename_create(int dfd, struct filename *name,
                                struct path *path, unsigned int lookup_flags)
 {
        struct dentry *dentry = ERR_PTR(-EEXIST);
-       struct nameidata nd;
+       struct qstr last;
+       int type;
        int err2;
        int error;
        bool is_dir = (lookup_flags & LOOKUP_DIRECTORY);
@@ -3329,26 +3381,25 @@ static struct dentry *filename_create(int dfd, struct filename *name,
         */
        lookup_flags &= LOOKUP_REVAL;
 
-       error = filename_lookup(dfd, name, LOOKUP_PARENT|lookup_flags, &nd);
-       if (error)
-               return ERR_PTR(error);
+       name = filename_parentat(dfd, name, lookup_flags, path, &last, &type);
+       if (IS_ERR(name))
+               return ERR_CAST(name);
 
        /*
         * Yucky last component or no last component at all?
         * (foo/., foo/.., /////)
         */
-       if (nd.last_type != LAST_NORM)
+       if (unlikely(type != LAST_NORM))
                goto out;
-       nd.flags &= ~LOOKUP_PARENT;
-       nd.flags |= LOOKUP_CREATE | LOOKUP_EXCL;
 
        /* don't fail immediately if it's r/o, at least try to report other errors */
-       err2 = mnt_want_write(nd.path.mnt);
+       err2 = mnt_want_write(path->mnt);
        /*
         * Do the final lookup.
         */
-       mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
-       dentry = lookup_hash(&nd);
+       lookup_flags |= LOOKUP_CREATE | LOOKUP_EXCL;
+       mutex_lock_nested(&path->dentry->d_inode->i_mutex, I_MUTEX_PARENT);
+       dentry = __lookup_hash(&last, path->dentry, lookup_flags);
        if (IS_ERR(dentry))
                goto unlock;
 
@@ -3362,7 +3413,7 @@ static struct dentry *filename_create(int dfd, struct filename *name,
         * all is fine. Let's be bastards - you had / on the end, you've
         * been asking for (non-existent) directory. -ENOENT for you.
         */
-       if (unlikely(!is_dir && nd.last.name[nd.last.len])) {
+       if (unlikely(!is_dir && last.name[last.len])) {
                error = -ENOENT;
                goto fail;
        }
@@ -3370,31 +3421,26 @@ static struct dentry *filename_create(int dfd, struct filename *name,
                error = err2;
                goto fail;
        }
-       *path = nd.path;
+       putname(name);
        return dentry;
 fail:
        dput(dentry);
        dentry = ERR_PTR(error);
 unlock:
-       mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
+       mutex_unlock(&path->dentry->d_inode->i_mutex);
        if (!err2)
-               mnt_drop_write(nd.path.mnt);
+               mnt_drop_write(path->mnt);
 out:
-       path_put(&nd.path);
+       path_put(path);
+       putname(name);
        return dentry;
 }
 
 struct dentry *kern_path_create(int dfd, const char *pathname,
                                struct path *path, unsigned int lookup_flags)
 {
-       struct filename *filename = getname_kernel(pathname);
-       struct dentry *res;
-
-       if (IS_ERR(filename))
-               return ERR_CAST(filename);
-       res = filename_create(dfd, filename, path, lookup_flags);
-       putname(filename);
-       return res;
+       return filename_create(dfd, getname_kernel(pathname),
+                               path, lookup_flags);
 }
 EXPORT_SYMBOL(kern_path_create);
 
@@ -3407,16 +3453,10 @@ void done_path_create(struct path *path, struct dentry *dentry)
 }
 EXPORT_SYMBOL(done_path_create);
 
-struct dentry *user_path_create(int dfd, const char __user *pathname,
+inline struct dentry *user_path_create(int dfd, const char __user *pathname,
                                struct path *path, unsigned int lookup_flags)
 {
-       struct filename *tmp = getname(pathname);
-       struct dentry *res;
-       if (IS_ERR(tmp))
-               return ERR_CAST(tmp);
-       res = filename_create(dfd, tmp, path, lookup_flags);
-       putname(tmp);
-       return res;
+       return filename_create(dfd, getname(pathname), path, lookup_flags);
 }
 EXPORT_SYMBOL(user_path_create);
 
@@ -3637,14 +3677,17 @@ static long do_rmdir(int dfd, const char __user *pathname)
        int error = 0;
        struct filename *name;
        struct dentry *dentry;
-       struct nameidata nd;
+       struct path path;
+       struct qstr last;
+       int type;
        unsigned int lookup_flags = 0;
 retry:
-       name = user_path_parent(dfd, pathname, &nd, lookup_flags);
+       name = user_path_parent(dfd, pathname,
+                               &path, &last, &type, lookup_flags);
        if (IS_ERR(name))
                return PTR_ERR(name);
 
-       switch(nd.last_type) {
+       switch (type) {
        case LAST_DOTDOT:
                error = -ENOTEMPTY;
                goto exit1;
@@ -3656,13 +3699,12 @@ retry:
                goto exit1;
        }
 
-       nd.flags &= ~LOOKUP_PARENT;
-       error = mnt_want_write(nd.path.mnt);
+       error = mnt_want_write(path.mnt);
        if (error)
                goto exit1;
 
-       mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
-       dentry = lookup_hash(&nd);
+       mutex_lock_nested(&path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
+       dentry = __lookup_hash(&last, path.dentry, lookup_flags);
        error = PTR_ERR(dentry);
        if (IS_ERR(dentry))
                goto exit2;
@@ -3670,17 +3712,17 @@ retry:
                error = -ENOENT;
                goto exit3;
        }
-       error = security_path_rmdir(&nd.path, dentry);
+       error = security_path_rmdir(&path, dentry);
        if (error)
                goto exit3;
-       error = vfs_rmdir(nd.path.dentry->d_inode, dentry);
+       error = vfs_rmdir(path.dentry->d_inode, dentry);
 exit3:
        dput(dentry);
 exit2:
-       mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
-       mnt_drop_write(nd.path.mnt);
+       mutex_unlock(&path.dentry->d_inode->i_mutex);
+       mnt_drop_write(path.mnt);
 exit1:
-       path_put(&nd.path);
+       path_put(&path);
        putname(name);
        if (retry_estale(error, lookup_flags)) {
                lookup_flags |= LOOKUP_REVAL;
@@ -3763,43 +3805,45 @@ static long do_unlinkat(int dfd, const char __user *pathname)
        int error;
        struct filename *name;
        struct dentry *dentry;
-       struct nameidata nd;
+       struct path path;
+       struct qstr last;
+       int type;
        struct inode *inode = NULL;
        struct inode *delegated_inode = NULL;
        unsigned int lookup_flags = 0;
 retry:
-       name = user_path_parent(dfd, pathname, &nd, lookup_flags);
+       name = user_path_parent(dfd, pathname,
+                               &path, &last, &type, lookup_flags);
        if (IS_ERR(name))
                return PTR_ERR(name);
 
        error = -EISDIR;
-       if (nd.last_type != LAST_NORM)
+       if (type != LAST_NORM)
                goto exit1;
 
-       nd.flags &= ~LOOKUP_PARENT;
-       error = mnt_want_write(nd.path.mnt);
+       error = mnt_want_write(path.mnt);
        if (error)
                goto exit1;
 retry_deleg:
-       mutex_lock_nested(&nd.path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
-       dentry = lookup_hash(&nd);
+       mutex_lock_nested(&path.dentry->d_inode->i_mutex, I_MUTEX_PARENT);
+       dentry = __lookup_hash(&last, path.dentry, lookup_flags);
        error = PTR_ERR(dentry);
        if (!IS_ERR(dentry)) {
                /* Why not before? Because we want correct error value */
-               if (nd.last.name[nd.last.len])
+               if (last.name[last.len])
                        goto slashes;
                inode = dentry->d_inode;
                if (d_is_negative(dentry))
                        goto slashes;
                ihold(inode);
-               error = security_path_unlink(&nd.path, dentry);
+               error = security_path_unlink(&path, dentry);
                if (error)
                        goto exit2;
-               error = vfs_unlink(nd.path.dentry->d_inode, dentry, &delegated_inode);
+               error = vfs_unlink(path.dentry->d_inode, dentry, &delegated_inode);
 exit2:
                dput(dentry);
        }
-       mutex_unlock(&nd.path.dentry->d_inode->i_mutex);
+       mutex_unlock(&path.dentry->d_inode->i_mutex);
        if (inode)
                iput(inode);    /* truncate the inode here */
        inode = NULL;
@@ -3808,9 +3852,9 @@ exit2:
                if (!error)
                        goto retry_deleg;
        }
-       mnt_drop_write(nd.path.mnt);
+       mnt_drop_write(path.mnt);
 exit1:
-       path_put(&nd.path);
+       path_put(&path);
        putname(name);
        if (retry_estale(error, lookup_flags)) {
                lookup_flags |= LOOKUP_REVAL;
@@ -4240,14 +4284,15 @@ EXPORT_SYMBOL(vfs_rename);
 SYSCALL_DEFINE5(renameat2, int, olddfd, const char __user *, oldname,
                int, newdfd, const char __user *, newname, unsigned int, flags)
 {
-       struct dentry *old_dir, *new_dir;
        struct dentry *old_dentry, *new_dentry;
        struct dentry *trap;
-       struct nameidata oldnd, newnd;
+       struct path old_path, new_path;
+       struct qstr old_last, new_last;
+       int old_type, new_type;
        struct inode *delegated_inode = NULL;
        struct filename *from;
        struct filename *to;
-       unsigned int lookup_flags = 0;
+       unsigned int lookup_flags = 0, target_flags = LOOKUP_RENAME_TARGET;
        bool should_retry = false;
        int error;
 
@@ -4261,47 +4306,45 @@ SYSCALL_DEFINE5(renameat2, int, olddfd, const char __user *, oldname,
        if ((flags & RENAME_WHITEOUT) && !capable(CAP_MKNOD))
                return -EPERM;
 
+       if (flags & RENAME_EXCHANGE)
+               target_flags = 0;
+
 retry:
-       from = user_path_parent(olddfd, oldname, &oldnd, lookup_flags);
+       from = user_path_parent(olddfd, oldname,
+                               &old_path, &old_last, &old_type, lookup_flags);
        if (IS_ERR(from)) {
                error = PTR_ERR(from);
                goto exit;
        }
 
-       to = user_path_parent(newdfd, newname, &newnd, lookup_flags);
+       to = user_path_parent(newdfd, newname,
+                               &new_path, &new_last, &new_type, lookup_flags);
        if (IS_ERR(to)) {
                error = PTR_ERR(to);
                goto exit1;
        }
 
        error = -EXDEV;
-       if (oldnd.path.mnt != newnd.path.mnt)
+       if (old_path.mnt != new_path.mnt)
                goto exit2;
 
-       old_dir = oldnd.path.dentry;
        error = -EBUSY;
-       if (oldnd.last_type != LAST_NORM)
+       if (old_type != LAST_NORM)
                goto exit2;
 
-       new_dir = newnd.path.dentry;
        if (flags & RENAME_NOREPLACE)
                error = -EEXIST;
-       if (newnd.last_type != LAST_NORM)
+       if (new_type != LAST_NORM)
                goto exit2;
 
-       error = mnt_want_write(oldnd.path.mnt);
+       error = mnt_want_write(old_path.mnt);
        if (error)
                goto exit2;
 
-       oldnd.flags &= ~LOOKUP_PARENT;
-       newnd.flags &= ~LOOKUP_PARENT;
-       if (!(flags & RENAME_EXCHANGE))
-               newnd.flags |= LOOKUP_RENAME_TARGET;
-
 retry_deleg:
-       trap = lock_rename(new_dir, old_dir);
+       trap = lock_rename(new_path.dentry, old_path.dentry);
 
-       old_dentry = lookup_hash(&oldnd);
+       old_dentry = __lookup_hash(&old_last, old_path.dentry, lookup_flags);
        error = PTR_ERR(old_dentry);
        if (IS_ERR(old_dentry))
                goto exit3;
@@ -4309,7 +4352,7 @@ retry_deleg:
        error = -ENOENT;
        if (d_is_negative(old_dentry))
                goto exit4;
-       new_dentry = lookup_hash(&newnd);
+       new_dentry = __lookup_hash(&new_last, new_path.dentry, lookup_flags | target_flags);
        error = PTR_ERR(new_dentry);
        if (IS_ERR(new_dentry))
                goto exit4;
@@ -4323,16 +4366,16 @@ retry_deleg:
 
                if (!d_is_dir(new_dentry)) {
                        error = -ENOTDIR;
-                       if (newnd.last.name[newnd.last.len])
+                       if (new_last.name[new_last.len])
                                goto exit5;
                }
        }
        /* unless the source is a directory trailing slashes give -ENOTDIR */
        if (!d_is_dir(old_dentry)) {
                error = -ENOTDIR;
-               if (oldnd.last.name[oldnd.last.len])
+               if (old_last.name[old_last.len])
                        goto exit5;
-               if (!(flags & RENAME_EXCHANGE) && newnd.last.name[newnd.last.len])
+               if (!(flags & RENAME_EXCHANGE) && new_last.name[new_last.len])
                        goto exit5;
        }
        /* source should not be ancestor of target */
@@ -4345,32 +4388,32 @@ retry_deleg:
        if (new_dentry == trap)
                goto exit5;
 
-       error = security_path_rename(&oldnd.path, old_dentry,
-                                    &newnd.path, new_dentry, flags);
+       error = security_path_rename(&old_path, old_dentry,
+                                    &new_path, new_dentry, flags);
        if (error)
                goto exit5;
-       error = vfs_rename(old_dir->d_inode, old_dentry,
-                          new_dir->d_inode, new_dentry,
+       error = vfs_rename(old_path.dentry->d_inode, old_dentry,
+                          new_path.dentry->d_inode, new_dentry,
                           &delegated_inode, flags);
 exit5:
        dput(new_dentry);
 exit4:
        dput(old_dentry);
 exit3:
-       unlock_rename(new_dir, old_dir);
+       unlock_rename(new_path.dentry, old_path.dentry);
        if (delegated_inode) {
                error = break_deleg_wait(&delegated_inode);
                if (!error)
                        goto retry_deleg;
        }
-       mnt_drop_write(oldnd.path.mnt);
+       mnt_drop_write(old_path.mnt);
 exit2:
        if (retry_estale(error, lookup_flags))
                should_retry = true;
-       path_put(&newnd.path);
+       path_put(&new_path);
        putname(to);
 exit1:
-       path_put(&oldnd.path);
+       path_put(&old_path);
        putname(from);
        if (should_retry) {
                should_retry = false;
@@ -4429,18 +4472,19 @@ EXPORT_SYMBOL(readlink_copy);
  */
 int generic_readlink(struct dentry *dentry, char __user *buffer, int buflen)
 {
-       struct nameidata nd;
        void *cookie;
+       struct inode *inode = d_inode(dentry);
+       const char *link = inode->i_link;
        int res;
 
-       nd.depth = 0;
-       cookie = dentry->d_inode->i_op->follow_link(dentry, &nd);
-       if (IS_ERR(cookie))
-               return PTR_ERR(cookie);
-
-       res = readlink_copy(buffer, buflen, nd_get_link(&nd));
-       if (dentry->d_inode->i_op->put_link)
-               dentry->d_inode->i_op->put_link(dentry, &nd, cookie);
+       if (!link) {
+               link = inode->i_op->follow_link(dentry, &cookie);
+               if (IS_ERR(link))
+                       return PTR_ERR(link);
+       }
+       res = readlink_copy(buffer, buflen, link);
+       if (inode->i_op->put_link)
+               inode->i_op->put_link(inode, cookie);
        return res;
 }
 EXPORT_SYMBOL(generic_readlink);
@@ -4472,22 +4516,21 @@ int page_readlink(struct dentry *dentry, char __user *buffer, int buflen)
 }
 EXPORT_SYMBOL(page_readlink);
 
-void *page_follow_link_light(struct dentry *dentry, struct nameidata *nd)
+const char *page_follow_link_light(struct dentry *dentry, void **cookie)
 {
        struct page *page = NULL;
-       nd_set_link(nd, page_getlink(dentry, &page));
-       return page;
+       char *res = page_getlink(dentry, &page);
+       if (!IS_ERR(res))
+               *cookie = page;
+       return res;
 }
 EXPORT_SYMBOL(page_follow_link_light);
 
-void page_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
+void page_put_link(struct inode *unused, void *cookie)
 {
        struct page *page = cookie;
-
-       if (page) {
-               kunmap(page);
-               page_cache_release(page);
-       }
+       kunmap(page);
+       page_cache_release(page);
 }
 EXPORT_SYMBOL(page_put_link);
 
index 1f4f9dac6e5af8017e41ab497b92e0515e5ed71c..9c1c43d0d4f10112bcf711068873a70e2a057200 100644 (file)
@@ -590,24 +590,35 @@ static void delayed_free_vfsmnt(struct rcu_head *head)
 }
 
 /* call under rcu_read_lock */
-bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
+int __legitimize_mnt(struct vfsmount *bastard, unsigned seq)
 {
        struct mount *mnt;
        if (read_seqretry(&mount_lock, seq))
-               return false;
+               return 1;
        if (bastard == NULL)
-               return true;
+               return 0;
        mnt = real_mount(bastard);
        mnt_add_count(mnt, 1);
        if (likely(!read_seqretry(&mount_lock, seq)))
-               return true;
+               return 0;
        if (bastard->mnt_flags & MNT_SYNC_UMOUNT) {
                mnt_add_count(mnt, -1);
-               return false;
+               return 1;
+       }
+       return -1;
+}
+
+/* call under rcu_read_lock */
+bool legitimize_mnt(struct vfsmount *bastard, unsigned seq)
+{
+       int res = __legitimize_mnt(bastard, seq);
+       if (likely(!res))
+               return true;
+       if (unlikely(res < 0)) {
+               rcu_read_unlock();
+               mntput(bastard);
+               rcu_read_lock();
        }
-       rcu_read_unlock();
-       mntput(bastard);
-       rcu_read_lock();
        return false;
 }
 
@@ -3179,6 +3190,12 @@ bool fs_fully_visible(struct file_system_type *type)
                if (mnt->mnt.mnt_sb->s_type != type)
                        continue;
 
+               /* This mount is not fully visible if it's root directory
+                * is not the root directory of the filesystem.
+                */
+               if (mnt->mnt.mnt_root != mnt->mnt.mnt_sb->s_root)
+                       continue;
+
                /* This mount is not fully visible if there are any child mounts
                 * that cover anything except for empty directories.
                 */
index 45b35b9b1e36a1213a2c2736e4ae551dcd0d8848..55e1e3af23a3d3f2313f977b185eb8c3f8ccbc6d 100644 (file)
@@ -38,6 +38,7 @@
 #include <linux/mm.h>
 #include <linux/delay.h>
 #include <linux/errno.h>
+#include <linux/file.h>
 #include <linux/string.h>
 #include <linux/ratelimit.h>
 #include <linux/printk.h>
@@ -5604,6 +5605,7 @@ static struct nfs4_lockdata *nfs4_alloc_lockdata(struct file_lock *fl,
        p->server = server;
        atomic_inc(&lsp->ls_count);
        p->ctx = get_nfs_open_context(ctx);
+       get_file(fl->fl_file);
        memcpy(&p->fl, fl, sizeof(p->fl));
        return p;
 out_free_seqid:
@@ -5716,6 +5718,7 @@ static void nfs4_lock_release(void *calldata)
                nfs_free_seqid(data->arg.lock_seqid);
        nfs4_put_lock_state(data->lsp);
        put_nfs_open_context(data->ctx);
+       fput(data->fl.fl_file);
        kfree(data);
        dprintk("%s: done!\n", __func__);
 }
index 2d56200655fe600ae73d11c382af5815285fab48..b6de433da5db14ab788ba358ce94b5952d5c601f 100644 (file)
@@ -20,7 +20,6 @@
 #include <linux/stat.h>
 #include <linux/mm.h>
 #include <linux/string.h>
-#include <linux/namei.h>
 
 /* Symlink caching in the page cache is even more simplistic
  * and straight-forward than readdir caching.
@@ -43,7 +42,7 @@ error:
        return -EIO;
 }
 
-static void *nfs_follow_link(struct dentry *dentry, struct nameidata *nd)
+static const char *nfs_follow_link(struct dentry *dentry, void **cookie)
 {
        struct inode *inode = d_inode(dentry);
        struct page *page;
@@ -51,19 +50,13 @@ static void *nfs_follow_link(struct dentry *dentry, struct nameidata *nd)
 
        err = ERR_PTR(nfs_revalidate_mapping(inode, inode->i_mapping));
        if (err)
-               goto read_failed;
+               return err;
        page = read_cache_page(&inode->i_data, 0,
                                (filler_t *)nfs_symlink_filler, inode);
-       if (IS_ERR(page)) {
-               err = page;
-               goto read_failed;
-       }
-       nd_set_link(nd, kmap(page));
-       return page;
-
-read_failed:
-       nd_set_link(nd, err);
-       return NULL;
+       if (IS_ERR(page))
+               return ERR_CAST(page);
+       *cookie = page;
+       return kmap(page);
 }
 
 /*
index d12a4be613a5ced58599f8095822f4659b32f9a4..dfc19f1575a19d00bee1b0aeef6575e4416a9ef9 100644 (file)
@@ -1845,12 +1845,15 @@ int nfs_wb_all(struct inode *inode)
        trace_nfs_writeback_inode_enter(inode);
 
        ret = filemap_write_and_wait(inode->i_mapping);
-       if (!ret) {
-               ret = nfs_commit_inode(inode, FLUSH_SYNC);
-               if (!ret)
-                       pnfs_sync_inode(inode, true);
-       }
+       if (ret)
+               goto out;
+       ret = nfs_commit_inode(inode, FLUSH_SYNC);
+       if (ret < 0)
+               goto out;
+       pnfs_sync_inode(inode, true);
+       ret = 0;
 
+out:
        trace_nfs_writeback_inode_exit(inode, ret);
        return ret;
 }
index 03d647bf195d78bb3d6611553c9ad3e6fa4385a2..cdefaa331a0719e88df91ef7c04c32706ae199a1 100644 (file)
@@ -181,6 +181,17 @@ nfsd4_block_proc_layoutcommit(struct inode *inode,
 }
 
 const struct nfsd4_layout_ops bl_layout_ops = {
+       /*
+        * Pretend that we send notification to the client.  This is a blatant
+        * lie to force recent Linux clients to cache our device IDs.
+        * We rarely ever change the device ID, so the harm of leaking deviceids
+        * for a while isn't too bad.  Unfortunately RFC5661 is a complete mess
+        * in this regard, but I filed errata 4119 for this a while ago, and
+        * hopefully the Linux client will eventually start caching deviceids
+        * without this again.
+        */
+       .notify_types           =
+                       NOTIFY_DEVICEID4_DELETE | NOTIFY_DEVICEID4_CHANGE,
        .proc_getdeviceinfo     = nfsd4_block_proc_getdeviceinfo,
        .encode_getdeviceinfo   = nfsd4_block_encode_getdeviceinfo,
        .proc_layoutget         = nfsd4_block_proc_layoutget,
index 58277859a467d878cf5cd5d947d7361f8b3396cf..5694cfb7a47b73a0d7128ed987cf1aaba6ea8859 100644 (file)
@@ -224,7 +224,7 @@ static int nfs_cb_stat_to_errno(int status)
 }
 
 static int decode_cb_op_status(struct xdr_stream *xdr, enum nfs_opnum4 expected,
-                              enum nfsstat4 *status)
+                              int *status)
 {
        __be32 *p;
        u32 op;
@@ -235,7 +235,7 @@ static int decode_cb_op_status(struct xdr_stream *xdr, enum nfs_opnum4 expected,
        op = be32_to_cpup(p++);
        if (unlikely(op != expected))
                goto out_unexpected;
-       *status = be32_to_cpup(p);
+       *status = nfs_cb_stat_to_errno(be32_to_cpup(p));
        return 0;
 out_overflow:
        print_overflow_msg(__func__, xdr);
@@ -446,22 +446,16 @@ out_overflow:
 static int decode_cb_sequence4res(struct xdr_stream *xdr,
                                  struct nfsd4_callback *cb)
 {
-       enum nfsstat4 nfserr;
        int status;
 
        if (cb->cb_minorversion == 0)
                return 0;
 
-       status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &nfserr);
-       if (unlikely(status))
-               goto out;
-       if (unlikely(nfserr != NFS4_OK))
-               goto out_default;
-       status = decode_cb_sequence4resok(xdr, cb);
-out:
-       return status;
-out_default:
-       return nfs_cb_stat_to_errno(nfserr);
+       status = decode_cb_op_status(xdr, OP_CB_SEQUENCE, &cb->cb_status);
+       if (unlikely(status || cb->cb_status))
+               return status;
+
+       return decode_cb_sequence4resok(xdr, cb);
 }
 
 /*
@@ -524,26 +518,19 @@ static int nfs4_xdr_dec_cb_recall(struct rpc_rqst *rqstp,
                                  struct nfsd4_callback *cb)
 {
        struct nfs4_cb_compound_hdr hdr;
-       enum nfsstat4 nfserr;
        int status;
 
        status = decode_cb_compound4res(xdr, &hdr);
        if (unlikely(status))
-               goto out;
+               return status;
 
        if (cb != NULL) {
                status = decode_cb_sequence4res(xdr, cb);
-               if (unlikely(status))
-                       goto out;
+               if (unlikely(status || cb->cb_status))
+                       return status;
        }
 
-       status = decode_cb_op_status(xdr, OP_CB_RECALL, &nfserr);
-       if (unlikely(status))
-               goto out;
-       if (unlikely(nfserr != NFS4_OK))
-               status = nfs_cb_stat_to_errno(nfserr);
-out:
-       return status;
+       return decode_cb_op_status(xdr, OP_CB_RECALL, &cb->cb_status);
 }
 
 #ifdef CONFIG_NFSD_PNFS
@@ -621,24 +608,18 @@ static int nfs4_xdr_dec_cb_layout(struct rpc_rqst *rqstp,
                                  struct nfsd4_callback *cb)
 {
        struct nfs4_cb_compound_hdr hdr;
-       enum nfsstat4 nfserr;
        int status;
 
        status = decode_cb_compound4res(xdr, &hdr);
        if (unlikely(status))
-               goto out;
+               return status;
+
        if (cb) {
                status = decode_cb_sequence4res(xdr, cb);
-               if (unlikely(status))
-                       goto out;
+               if (unlikely(status || cb->cb_status))
+                       return status;
        }
-       status = decode_cb_op_status(xdr, OP_CB_LAYOUTRECALL, &nfserr);
-       if (unlikely(status))
-               goto out;
-       if (unlikely(nfserr != NFS4_OK))
-               status = nfs_cb_stat_to_errno(nfserr);
-out:
-       return status;
+       return decode_cb_op_status(xdr, OP_CB_LAYOUTRECALL, &cb->cb_status);
 }
 #endif /* CONFIG_NFSD_PNFS */
 
@@ -898,13 +879,6 @@ static void nfsd4_cb_prepare(struct rpc_task *task, void *calldata)
                if (!nfsd41_cb_get_slot(clp, task))
                        return;
        }
-       spin_lock(&clp->cl_lock);
-       if (list_empty(&cb->cb_per_client)) {
-               /* This is the first call, not a restart */
-               cb->cb_done = false;
-               list_add(&cb->cb_per_client, &clp->cl_callbacks);
-       }
-       spin_unlock(&clp->cl_lock);
        rpc_call_start(task);
 }
 
@@ -918,22 +892,33 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
 
        if (clp->cl_minorversion) {
                /* No need for lock, access serialized in nfsd4_cb_prepare */
-               ++clp->cl_cb_session->se_cb_seq_nr;
+               if (!task->tk_status)
+                       ++clp->cl_cb_session->se_cb_seq_nr;
                clear_bit(0, &clp->cl_cb_slot_busy);
                rpc_wake_up_next(&clp->cl_cb_waitq);
                dprintk("%s: freed slot, new seqid=%d\n", __func__,
                        clp->cl_cb_session->se_cb_seq_nr);
        }
 
-       if (clp->cl_cb_client != task->tk_client) {
-               /* We're shutting down or changing cl_cb_client; leave
-                * it to nfsd4_process_cb_update to restart the call if
-                * necessary. */
+       /*
+        * If the backchannel connection was shut down while this
+        * task was queued, we need to resubmit it after setting up
+        * a new backchannel connection.
+        *
+        * Note that if we lost our callback connection permanently
+        * the submission code will error out, so we don't need to
+        * handle that case here.
+        */
+       if (task->tk_flags & RPC_TASK_KILLED) {
+               task->tk_status = 0;
+               cb->cb_need_restart = true;
                return;
        }
 
-       if (cb->cb_done)
-               return;
+       if (cb->cb_status) {
+               WARN_ON_ONCE(task->tk_status);
+               task->tk_status = cb->cb_status;
+       }
 
        switch (cb->cb_ops->done(cb, task)) {
        case 0:
@@ -949,21 +934,17 @@ static void nfsd4_cb_done(struct rpc_task *task, void *calldata)
        default:
                BUG();
        }
-       cb->cb_done = true;
 }
 
 static void nfsd4_cb_release(void *calldata)
 {
        struct nfsd4_callback *cb = calldata;
-       struct nfs4_client *clp = cb->cb_clp;
-
-       if (cb->cb_done) {
-               spin_lock(&clp->cl_lock);
-               list_del(&cb->cb_per_client);
-               spin_unlock(&clp->cl_lock);
 
+       if (cb->cb_need_restart)
+               nfsd4_run_cb(cb);
+       else
                cb->cb_ops->release(cb);
-       }
+
 }
 
 static const struct rpc_call_ops nfsd4_cb_ops = {
@@ -1058,9 +1039,6 @@ static void nfsd4_process_cb_update(struct nfsd4_callback *cb)
                nfsd4_mark_cb_down(clp, err);
                return;
        }
-       /* Yay, the callback channel's back! Restart any callbacks: */
-       list_for_each_entry(cb, &clp->cl_callbacks, cb_per_client)
-               queue_work(callback_wq, &cb->cb_work);
 }
 
 static void
@@ -1071,8 +1049,12 @@ nfsd4_run_cb_work(struct work_struct *work)
        struct nfs4_client *clp = cb->cb_clp;
        struct rpc_clnt *clnt;
 
-       if (cb->cb_ops && cb->cb_ops->prepare)
-               cb->cb_ops->prepare(cb);
+       if (cb->cb_need_restart) {
+               cb->cb_need_restart = false;
+       } else {
+               if (cb->cb_ops && cb->cb_ops->prepare)
+                       cb->cb_ops->prepare(cb);
+       }
 
        if (clp->cl_flags & NFSD4_CLIENT_CB_FLAG_MASK)
                nfsd4_process_cb_update(cb);
@@ -1084,6 +1066,15 @@ nfsd4_run_cb_work(struct work_struct *work)
                        cb->cb_ops->release(cb);
                return;
        }
+
+       /*
+        * Don't send probe messages for 4.1 or later.
+        */
+       if (!cb->cb_ops && clp->cl_minorversion) {
+               clp->cl_cb_state = NFSD4_CB_UP;
+               return;
+       }
+
        cb->cb_msg.rpc_cred = clp->cl_cb_cred;
        rpc_call_async(clnt, &cb->cb_msg, RPC_TASK_SOFT | RPC_TASK_SOFTCONN,
                        cb->cb_ops ? &nfsd4_cb_ops : &nfsd4_cb_probe_ops, cb);
@@ -1098,8 +1089,8 @@ void nfsd4_init_cb(struct nfsd4_callback *cb, struct nfs4_client *clp,
        cb->cb_msg.rpc_resp = cb;
        cb->cb_ops = ops;
        INIT_WORK(&cb->cb_work, nfsd4_run_cb_work);
-       INIT_LIST_HEAD(&cb->cb_per_client);
-       cb->cb_done = true;
+       cb->cb_status = 0;
+       cb->cb_need_restart = false;
 }
 
 void nfsd4_run_cb(struct nfsd4_callback *cb)
index 38f2d7abe3a707061c3f860ce8042649bf5109eb..039f9c8a95e84289c7296e9b78973efbea9d6e5a 100644 (file)
@@ -94,6 +94,7 @@ static struct kmem_cache *lockowner_slab;
 static struct kmem_cache *file_slab;
 static struct kmem_cache *stateid_slab;
 static struct kmem_cache *deleg_slab;
+static struct kmem_cache *odstate_slab;
 
 static void free_session(struct nfsd4_session *);
 
@@ -281,6 +282,7 @@ put_nfs4_file(struct nfs4_file *fi)
        if (atomic_dec_and_lock(&fi->fi_ref, &state_lock)) {
                hlist_del_rcu(&fi->fi_hash);
                spin_unlock(&state_lock);
+               WARN_ON_ONCE(!list_empty(&fi->fi_clnt_odstate));
                WARN_ON_ONCE(!list_empty(&fi->fi_delegations));
                call_rcu(&fi->fi_rcu, nfsd4_free_file_rcu);
        }
@@ -471,6 +473,86 @@ static void nfs4_file_put_access(struct nfs4_file *fp, u32 access)
                __nfs4_file_put_access(fp, O_RDONLY);
 }
 
+/*
+ * Allocate a new open/delegation state counter. This is needed for
+ * pNFS for proper return on close semantics.
+ *
+ * Note that we only allocate it for pNFS-enabled exports, otherwise
+ * all pointers to struct nfs4_clnt_odstate are always NULL.
+ */
+static struct nfs4_clnt_odstate *
+alloc_clnt_odstate(struct nfs4_client *clp)
+{
+       struct nfs4_clnt_odstate *co;
+
+       co = kmem_cache_zalloc(odstate_slab, GFP_KERNEL);
+       if (co) {
+               co->co_client = clp;
+               atomic_set(&co->co_odcount, 1);
+       }
+       return co;
+}
+
+static void
+hash_clnt_odstate_locked(struct nfs4_clnt_odstate *co)
+{
+       struct nfs4_file *fp = co->co_file;
+
+       lockdep_assert_held(&fp->fi_lock);
+       list_add(&co->co_perfile, &fp->fi_clnt_odstate);
+}
+
+static inline void
+get_clnt_odstate(struct nfs4_clnt_odstate *co)
+{
+       if (co)
+               atomic_inc(&co->co_odcount);
+}
+
+static void
+put_clnt_odstate(struct nfs4_clnt_odstate *co)
+{
+       struct nfs4_file *fp;
+
+       if (!co)
+               return;
+
+       fp = co->co_file;
+       if (atomic_dec_and_lock(&co->co_odcount, &fp->fi_lock)) {
+               list_del(&co->co_perfile);
+               spin_unlock(&fp->fi_lock);
+
+               nfsd4_return_all_file_layouts(co->co_client, fp);
+               kmem_cache_free(odstate_slab, co);
+       }
+}
+
+static struct nfs4_clnt_odstate *
+find_or_hash_clnt_odstate(struct nfs4_file *fp, struct nfs4_clnt_odstate *new)
+{
+       struct nfs4_clnt_odstate *co;
+       struct nfs4_client *cl;
+
+       if (!new)
+               return NULL;
+
+       cl = new->co_client;
+
+       spin_lock(&fp->fi_lock);
+       list_for_each_entry(co, &fp->fi_clnt_odstate, co_perfile) {
+               if (co->co_client == cl) {
+                       get_clnt_odstate(co);
+                       goto out;
+               }
+       }
+       co = new;
+       co->co_file = fp;
+       hash_clnt_odstate_locked(new);
+out:
+       spin_unlock(&fp->fi_lock);
+       return co;
+}
+
 struct nfs4_stid *nfs4_alloc_stid(struct nfs4_client *cl,
                                         struct kmem_cache *slab)
 {
@@ -606,7 +688,8 @@ static void block_delegations(struct knfsd_fh *fh)
 }
 
 static struct nfs4_delegation *
-alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh)
+alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh,
+                struct nfs4_clnt_odstate *odstate)
 {
        struct nfs4_delegation *dp;
        long n;
@@ -631,6 +714,8 @@ alloc_init_deleg(struct nfs4_client *clp, struct svc_fh *current_fh)
        INIT_LIST_HEAD(&dp->dl_perfile);
        INIT_LIST_HEAD(&dp->dl_perclnt);
        INIT_LIST_HEAD(&dp->dl_recall_lru);
+       dp->dl_clnt_odstate = odstate;
+       get_clnt_odstate(odstate);
        dp->dl_type = NFS4_OPEN_DELEGATE_READ;
        dp->dl_retries = 1;
        nfsd4_init_cb(&dp->dl_recall, dp->dl_stid.sc_client,
@@ -714,6 +799,7 @@ static void destroy_delegation(struct nfs4_delegation *dp)
        spin_lock(&state_lock);
        unhash_delegation_locked(dp);
        spin_unlock(&state_lock);
+       put_clnt_odstate(dp->dl_clnt_odstate);
        nfs4_put_deleg_lease(dp->dl_stid.sc_file);
        nfs4_put_stid(&dp->dl_stid);
 }
@@ -724,6 +810,7 @@ static void revoke_delegation(struct nfs4_delegation *dp)
 
        WARN_ON(!list_empty(&dp->dl_recall_lru));
 
+       put_clnt_odstate(dp->dl_clnt_odstate);
        nfs4_put_deleg_lease(dp->dl_stid.sc_file);
 
        if (clp->cl_minorversion == 0)
@@ -933,6 +1020,7 @@ static void nfs4_free_ol_stateid(struct nfs4_stid *stid)
 {
        struct nfs4_ol_stateid *stp = openlockstateid(stid);
 
+       put_clnt_odstate(stp->st_clnt_odstate);
        release_all_access(stp);
        if (stp->st_stateowner)
                nfs4_put_stateowner(stp->st_stateowner);
@@ -1538,7 +1626,6 @@ static struct nfs4_client *alloc_client(struct xdr_netobj name)
        INIT_LIST_HEAD(&clp->cl_openowners);
        INIT_LIST_HEAD(&clp->cl_delegations);
        INIT_LIST_HEAD(&clp->cl_lru);
-       INIT_LIST_HEAD(&clp->cl_callbacks);
        INIT_LIST_HEAD(&clp->cl_revoked);
 #ifdef CONFIG_NFSD_PNFS
        INIT_LIST_HEAD(&clp->cl_lo_states);
@@ -1634,6 +1721,7 @@ __destroy_client(struct nfs4_client *clp)
        while (!list_empty(&reaplist)) {
                dp = list_entry(reaplist.next, struct nfs4_delegation, dl_recall_lru);
                list_del_init(&dp->dl_recall_lru);
+               put_clnt_odstate(dp->dl_clnt_odstate);
                nfs4_put_deleg_lease(dp->dl_stid.sc_file);
                nfs4_put_stid(&dp->dl_stid);
        }
@@ -3057,6 +3145,7 @@ static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
        spin_lock_init(&fp->fi_lock);
        INIT_LIST_HEAD(&fp->fi_stateids);
        INIT_LIST_HEAD(&fp->fi_delegations);
+       INIT_LIST_HEAD(&fp->fi_clnt_odstate);
        fh_copy_shallow(&fp->fi_fhandle, fh);
        fp->fi_deleg_file = NULL;
        fp->fi_had_conflict = false;
@@ -3073,6 +3162,7 @@ static void nfsd4_init_file(struct knfsd_fh *fh, unsigned int hashval,
 void
 nfsd4_free_slabs(void)
 {
+       kmem_cache_destroy(odstate_slab);
        kmem_cache_destroy(openowner_slab);
        kmem_cache_destroy(lockowner_slab);
        kmem_cache_destroy(file_slab);
@@ -3103,8 +3193,14 @@ nfsd4_init_slabs(void)
                        sizeof(struct nfs4_delegation), 0, 0, NULL);
        if (deleg_slab == NULL)
                goto out_free_stateid_slab;
+       odstate_slab = kmem_cache_create("nfsd4_odstate",
+                       sizeof(struct nfs4_clnt_odstate), 0, 0, NULL);
+       if (odstate_slab == NULL)
+               goto out_free_deleg_slab;
        return 0;
 
+out_free_deleg_slab:
+       kmem_cache_destroy(deleg_slab);
 out_free_stateid_slab:
        kmem_cache_destroy(stateid_slab);
 out_free_file_slab:
@@ -3581,6 +3677,14 @@ alloc_stateid:
        open->op_stp = nfs4_alloc_open_stateid(clp);
        if (!open->op_stp)
                return nfserr_jukebox;
+
+       if (nfsd4_has_session(cstate) &&
+           (cstate->current_fh.fh_export->ex_flags & NFSEXP_PNFS)) {
+               open->op_odstate = alloc_clnt_odstate(clp);
+               if (!open->op_odstate)
+                       return nfserr_jukebox;
+       }
+
        return nfs_ok;
 }
 
@@ -3869,7 +3973,7 @@ out_fput:
 
 static struct nfs4_delegation *
 nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
-                   struct nfs4_file *fp)
+                   struct nfs4_file *fp, struct nfs4_clnt_odstate *odstate)
 {
        int status;
        struct nfs4_delegation *dp;
@@ -3877,7 +3981,7 @@ nfs4_set_delegation(struct nfs4_client *clp, struct svc_fh *fh,
        if (fp->fi_had_conflict)
                return ERR_PTR(-EAGAIN);
 
-       dp = alloc_init_deleg(clp, fh);
+       dp = alloc_init_deleg(clp, fh, odstate);
        if (!dp)
                return ERR_PTR(-ENOMEM);
 
@@ -3903,6 +4007,7 @@ out_unlock:
        spin_unlock(&state_lock);
 out:
        if (status) {
+               put_clnt_odstate(dp->dl_clnt_odstate);
                nfs4_put_stid(&dp->dl_stid);
                return ERR_PTR(status);
        }
@@ -3980,7 +4085,7 @@ nfs4_open_delegation(struct svc_fh *fh, struct nfsd4_open *open,
                default:
                        goto out_no_deleg;
        }
-       dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file);
+       dp = nfs4_set_delegation(clp, fh, stp->st_stid.sc_file, stp->st_clnt_odstate);
        if (IS_ERR(dp))
                goto out_no_deleg;
 
@@ -4069,6 +4174,11 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
                        release_open_stateid(stp);
                        goto out;
                }
+
+               stp->st_clnt_odstate = find_or_hash_clnt_odstate(fp,
+                                                       open->op_odstate);
+               if (stp->st_clnt_odstate == open->op_odstate)
+                       open->op_odstate = NULL;
        }
        update_stateid(&stp->st_stid.sc_stateid);
        memcpy(&open->op_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
@@ -4129,6 +4239,8 @@ void nfsd4_cleanup_open_state(struct nfsd4_compound_state *cstate,
                kmem_cache_free(file_slab, open->op_file);
        if (open->op_stp)
                nfs4_put_stid(&open->op_stp->st_stid);
+       if (open->op_odstate)
+               kmem_cache_free(odstate_slab, open->op_odstate);
 }
 
 __be32
@@ -4385,10 +4497,17 @@ static __be32 check_stateid_generation(stateid_t *in, stateid_t *ref, bool has_s
        return nfserr_old_stateid;
 }
 
+static __be32 nfsd4_check_openowner_confirmed(struct nfs4_ol_stateid *ols)
+{
+       if (ols->st_stateowner->so_is_open_owner &&
+           !(openowner(ols->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
+               return nfserr_bad_stateid;
+       return nfs_ok;
+}
+
 static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
 {
        struct nfs4_stid *s;
-       struct nfs4_ol_stateid *ols;
        __be32 status = nfserr_bad_stateid;
 
        if (ZERO_STATEID(stateid) || ONE_STATEID(stateid))
@@ -4418,13 +4537,7 @@ static __be32 nfsd4_validate_stateid(struct nfs4_client *cl, stateid_t *stateid)
                break;
        case NFS4_OPEN_STID:
        case NFS4_LOCK_STID:
-               ols = openlockstateid(s);
-               if (ols->st_stateowner->so_is_open_owner
-                               && !(openowner(ols->st_stateowner)->oo_flags
-                                               & NFS4_OO_CONFIRMED))
-                       status = nfserr_bad_stateid;
-               else
-                       status = nfs_ok;
+               status = nfsd4_check_openowner_confirmed(openlockstateid(s));
                break;
        default:
                printk("unknown stateid type %x\n", s->sc_type);
@@ -4516,8 +4629,8 @@ nfs4_preprocess_stateid_op(struct net *net, struct nfsd4_compound_state *cstate,
                status = nfs4_check_fh(current_fh, stp);
                if (status)
                        goto out;
-               if (stp->st_stateowner->so_is_open_owner
-                   && !(openowner(stp->st_stateowner)->oo_flags & NFS4_OO_CONFIRMED))
+               status = nfsd4_check_openowner_confirmed(stp);
+               if (status)
                        goto out;
                status = nfs4_check_openmode(stp, flags);
                if (status)
@@ -4852,9 +4965,6 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
        update_stateid(&stp->st_stid.sc_stateid);
        memcpy(&close->cl_stateid, &stp->st_stid.sc_stateid, sizeof(stateid_t));
 
-       nfsd4_return_all_file_layouts(stp->st_stateowner->so_client,
-                                     stp->st_stid.sc_file);
-
        nfsd4_close_open_stateid(stp);
 
        /* put reference from nfs4_preprocess_seqid_op */
@@ -6488,6 +6598,7 @@ nfs4_state_shutdown_net(struct net *net)
        list_for_each_safe(pos, next, &reaplist) {
                dp = list_entry (pos, struct nfs4_delegation, dl_recall_lru);
                list_del_init(&dp->dl_recall_lru);
+               put_clnt_odstate(dp->dl_clnt_odstate);
                nfs4_put_deleg_lease(dp->dl_stid.sc_file);
                nfs4_put_stid(&dp->dl_stid);
        }
index 4f3bfeb1176662ce5c7eb6f59f9440c95c8cf68e..dbc4f85a500825a2c80b21a9a92b52ff1631b8fb 100644 (file)
@@ -63,12 +63,12 @@ typedef struct {
 
 struct nfsd4_callback {
        struct nfs4_client *cb_clp;
-       struct list_head cb_per_client;
        u32 cb_minorversion;
        struct rpc_message cb_msg;
        struct nfsd4_callback_ops *cb_ops;
        struct work_struct cb_work;
-       bool cb_done;
+       int cb_status;
+       bool cb_need_restart;
 };
 
 struct nfsd4_callback_ops {
@@ -126,6 +126,7 @@ struct nfs4_delegation {
        struct list_head        dl_perfile;
        struct list_head        dl_perclnt;
        struct list_head        dl_recall_lru;  /* delegation recalled */
+       struct nfs4_clnt_odstate *dl_clnt_odstate;
        u32                     dl_type;
        time_t                  dl_time;
 /* For recall: */
@@ -332,7 +333,6 @@ struct nfs4_client {
        int                     cl_cb_state;
        struct nfsd4_callback   cl_cb_null;
        struct nfsd4_session    *cl_cb_session;
-       struct list_head        cl_callbacks; /* list of in-progress callbacks */
 
        /* for all client information that callback code might need: */
        spinlock_t              cl_lock;
@@ -464,6 +464,17 @@ static inline struct nfs4_lockowner * lockowner(struct nfs4_stateowner *so)
        return container_of(so, struct nfs4_lockowner, lo_owner);
 }
 
+/*
+ * Per-client state indicating no. of opens and outstanding delegations
+ * on a file from a particular client.'od' stands for 'open & delegation'
+ */
+struct nfs4_clnt_odstate {
+       struct nfs4_client      *co_client;
+       struct nfs4_file        *co_file;
+       struct list_head        co_perfile;
+       atomic_t                co_odcount;
+};
+
 /*
  * nfs4_file: a file opened by some number of (open) nfs4_stateowners.
  *
@@ -485,6 +496,7 @@ struct nfs4_file {
                struct list_head        fi_delegations;
                struct rcu_head         fi_rcu;
        };
+       struct list_head        fi_clnt_odstate;
        /* One each for O_RDONLY, O_WRONLY, O_RDWR: */
        struct file *           fi_fds[3];
        /*
@@ -526,6 +538,7 @@ struct nfs4_ol_stateid {
        struct list_head              st_perstateowner;
        struct list_head              st_locks;
        struct nfs4_stateowner      * st_stateowner;
+       struct nfs4_clnt_odstate    * st_clnt_odstate;
        unsigned char                 st_access_bmap;
        unsigned char                 st_deny_bmap;
        struct nfs4_ol_stateid         * st_openstp;
index f982ae84f0cd2303df5275086a3a05ce519bbd86..2f8c092be2b3344901f07f2f16e0ee239034ccdf 100644 (file)
@@ -247,6 +247,7 @@ struct nfsd4_open {
        struct nfs4_openowner *op_openowner; /* used during processing */
        struct nfs4_file *op_file;          /* used during processing */
        struct nfs4_ol_stateid *op_stp;     /* used during processing */
+       struct nfs4_clnt_odstate *op_odstate; /* used during processing */
        struct nfs4_acl *op_acl;
        struct xdr_netobj op_label;
 };
index 0f35b80d17fe019cdae356ecaba8359ac9f9a9a2..443abecf01b7d45cfb19be0ee63032b156ae718b 100644 (file)
@@ -35,7 +35,7 @@
  * ntfs_lookup - find the inode represented by a dentry in a directory inode
  * @dir_ino:   directory inode in which to look for the inode
  * @dent:      dentry representing the inode to look for
- * @nd:                lookup nameidata
+ * @flags:     lookup flags
  *
  * In short, ntfs_lookup() looks for the inode represented by the dentry @dent
  * in the directory inode @dir_ino and if found attaches the inode to the
index 082234581d05b2b2190601f3b8a5f545f7380140..83f4e76511c2bf7804c922f268ba4319a5cfb799 100644 (file)
@@ -159,7 +159,7 @@ int omfs_allocate_range(struct super_block *sb,
        goto out;
 
 found:
-       *return_block = i * bits_per_entry + bit;
+       *return_block = (u64) i * bits_per_entry + bit;
        *return_size = run;
        ret = set_run(sb, i, bits_per_entry, bit, run, 1);
 
index 138321b0c6c2b95a8efcef1a5b3183f3126acbb9..3d935c81789aaab13e33e52fab88b408fbccd6f2 100644 (file)
@@ -306,7 +306,8 @@ static const struct super_operations omfs_sops = {
  */
 static int omfs_get_imap(struct super_block *sb)
 {
-       unsigned int bitmap_size, count, array_size;
+       unsigned int bitmap_size, array_size;
+       int count;
        struct omfs_sb_info *sbi = OMFS_SB(sb);
        struct buffer_head *bh;
        unsigned long **ptr;
@@ -359,7 +360,7 @@ nomem:
 }
 
 enum {
-       Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask
+       Opt_uid, Opt_gid, Opt_umask, Opt_dmask, Opt_fmask, Opt_err
 };
 
 static const match_table_t tokens = {
@@ -368,6 +369,7 @@ static const match_table_t tokens = {
        {Opt_umask, "umask=%o"},
        {Opt_dmask, "dmask=%o"},
        {Opt_fmask, "fmask=%o"},
+       {Opt_err, NULL},
 };
 
 static int parse_options(char *options, struct omfs_sb_info *sbi)
@@ -548,8 +550,10 @@ static int omfs_fill_super(struct super_block *sb, void *data, int silent)
        }
 
        sb->s_root = d_make_root(root);
-       if (!sb->s_root)
+       if (!sb->s_root) {
+               ret = -ENOMEM;
                goto out_brelse_bh2;
+       }
        printk(KERN_DEBUG "omfs: Mounted volume %s\n", omfs_rb->r_name);
 
        ret = 0;
index 98e5a52dc68c9503136b8ff6a89b44dacd5478bf..e0250bdcc44005db6510ac516a53923282db5d12 100644 (file)
--- a/fs/open.c
+++ b/fs/open.c
@@ -367,7 +367,7 @@ retry:
        if (res)
                goto out;
 
-       inode = path.dentry->d_inode;
+       inode = d_backing_inode(path.dentry);
 
        if ((mode & MAY_EXEC) && S_ISREG(inode->i_mode)) {
                /*
index 24f640441bd90977a079aac782768025c68f3712..84d693d374284b580208fec3b8eb3c57bdd4195c 100644 (file)
@@ -299,6 +299,9 @@ int ovl_copy_up_one(struct dentry *parent, struct dentry *dentry,
        struct cred *override_cred;
        char *link = NULL;
 
+       if (WARN_ON(!workdir))
+               return -EROFS;
+
        ovl_path_upper(parent, &parentpath);
        upperdir = parentpath.dentry;
 
index d139405d2bfad7cfd94c735913ecebf221def5b5..692ceda3bc21f6976b65f3e2d5aa4b7ef2e9c5e8 100644 (file)
@@ -222,6 +222,9 @@ static struct dentry *ovl_clear_empty(struct dentry *dentry,
        struct kstat stat;
        int err;
 
+       if (WARN_ON(!workdir))
+               return ERR_PTR(-EROFS);
+
        err = ovl_lock_rename_workdir(workdir, upperdir);
        if (err)
                goto out;
@@ -322,6 +325,9 @@ static int ovl_create_over_whiteout(struct dentry *dentry, struct inode *inode,
        struct dentry *newdentry;
        int err;
 
+       if (WARN_ON(!workdir))
+               return -EROFS;
+
        err = ovl_lock_rename_workdir(workdir, upperdir);
        if (err)
                goto out;
@@ -506,11 +512,28 @@ static int ovl_remove_and_whiteout(struct dentry *dentry, bool is_dir)
        struct dentry *opaquedir = NULL;
        int err;
 
-       if (is_dir && OVL_TYPE_MERGE_OR_LOWER(ovl_path_type(dentry))) {
-               opaquedir = ovl_check_empty_and_clear(dentry);
-               err = PTR_ERR(opaquedir);
-               if (IS_ERR(opaquedir))
-                       goto out;
+       if (WARN_ON(!workdir))
+               return -EROFS;
+
+       if (is_dir) {
+               if (OVL_TYPE_MERGE_OR_LOWER(ovl_path_type(dentry))) {
+                       opaquedir = ovl_check_empty_and_clear(dentry);
+                       err = PTR_ERR(opaquedir);
+                       if (IS_ERR(opaquedir))
+                               goto out;
+               } else {
+                       LIST_HEAD(list);
+
+                       /*
+                        * When removing an empty opaque directory, then it
+                        * makes no sense to replace it with an exact replica of
+                        * itself.  But emptiness still needs to be checked.
+                        */
+                       err = ovl_check_empty_dir(dentry, &list);
+                       ovl_cache_free(&list);
+                       if (err)
+                               goto out;
+               }
        }
 
        err = ovl_lock_rename_workdir(workdir, upperdir);
index 04f1248846877d019625c861b474702177b38ae5..308379b2d0b2cb82b6ad505755484a19b4042fa0 100644 (file)
@@ -140,11 +140,12 @@ struct ovl_link_data {
        void *cookie;
 };
 
-static void *ovl_follow_link(struct dentry *dentry, struct nameidata *nd)
+static const char *ovl_follow_link(struct dentry *dentry, void **cookie)
 {
-       void *ret;
        struct dentry *realdentry;
        struct inode *realinode;
+       struct ovl_link_data *data = NULL;
+       const char *ret;
 
        realdentry = ovl_dentry_real(dentry);
        realinode = realdentry->d_inode;
@@ -152,28 +153,28 @@ static void *ovl_follow_link(struct dentry *dentry, struct nameidata *nd)
        if (WARN_ON(!realinode->i_op->follow_link))
                return ERR_PTR(-EPERM);
 
-       ret = realinode->i_op->follow_link(realdentry, nd);
-       if (IS_ERR(ret))
-               return ret;
-
        if (realinode->i_op->put_link) {
-               struct ovl_link_data *data;
-
                data = kmalloc(sizeof(struct ovl_link_data), GFP_KERNEL);
-               if (!data) {
-                       realinode->i_op->put_link(realdentry, nd, ret);
+               if (!data)
                        return ERR_PTR(-ENOMEM);
-               }
                data->realdentry = realdentry;
-               data->cookie = ret;
+       }
 
-               return data;
-       } else {
-               return NULL;
+       ret = realinode->i_op->follow_link(realdentry, cookie);
+       if (IS_ERR_OR_NULL(ret)) {
+               kfree(data);
+               return ret;
        }
+
+       if (data)
+               data->cookie = *cookie;
+
+       *cookie = data;
+
+       return ret;
 }
 
-static void ovl_put_link(struct dentry *dentry, struct nameidata *nd, void *c)
+static void ovl_put_link(struct inode *unused, void *c)
 {
        struct inode *realinode;
        struct ovl_link_data *data = c;
@@ -182,7 +183,7 @@ static void ovl_put_link(struct dentry *dentry, struct nameidata *nd, void *c)
                return;
 
        realinode = data->realdentry->d_inode;
-       realinode->i_op->put_link(data->realdentry, nd, data->cookie);
+       realinode->i_op->put_link(realinode, data->cookie);
        kfree(data);
 }
 
index 5f0d1993e6e3952bda9352d231e8fce7dee838e8..bf8537c7f455207830046a50d67d394f86d37f4a 100644 (file)
@@ -529,7 +529,7 @@ static int ovl_remount(struct super_block *sb, int *flags, char *data)
 {
        struct ovl_fs *ufs = sb->s_fs_info;
 
-       if (!(*flags & MS_RDONLY) && !ufs->upper_mnt)
+       if (!(*flags & MS_RDONLY) && (!ufs->upper_mnt || !ufs->workdir))
                return -EROFS;
 
        return 0;
@@ -925,9 +925,10 @@ static int ovl_fill_super(struct super_block *sb, void *data, int silent)
                ufs->workdir = ovl_workdir_create(ufs->upper_mnt, workpath.dentry);
                err = PTR_ERR(ufs->workdir);
                if (IS_ERR(ufs->workdir)) {
-                       pr_err("overlayfs: failed to create directory %s/%s\n",
-                              ufs->config.workdir, OVL_WORKDIR_NAME);
-                       goto out_put_upper_mnt;
+                       pr_warn("overlayfs: failed to create directory %s/%s (errno: %i); mounting read-only\n",
+                               ufs->config.workdir, OVL_WORKDIR_NAME, -err);
+                       sb->s_flags |= MS_RDONLY;
+                       ufs->workdir = NULL;
                }
        }
 
@@ -997,7 +998,6 @@ out_put_lower_mnt:
        kfree(ufs->lower_mnt);
 out_put_workdir:
        dput(ufs->workdir);
-out_put_upper_mnt:
        mntput(ufs->upper_mnt);
 out_put_lowerpath:
        for (i = 0; i < numlower; i++)
index 093ca14f570154f5de1cc6db10c7995e1b7cba02..286a422f440e9ed8a6c4b9c54ad7b7c85d28d6f2 100644 (file)
@@ -1380,7 +1380,7 @@ static int proc_exe_link(struct dentry *dentry, struct path *exe_path)
                return -ENOENT;
 }
 
-static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
+static const char *proc_pid_follow_link(struct dentry *dentry, void **cookie)
 {
        struct inode *inode = d_inode(dentry);
        struct path path;
@@ -1394,7 +1394,7 @@ static void *proc_pid_follow_link(struct dentry *dentry, struct nameidata *nd)
        if (error)
                goto out;
 
-       nd_jump_link(nd, &path);
+       nd_jump_link(&path);
        return NULL;
 out:
        return ERR_PTR(error);
index 8272aaba1bb06fd4b65416979155f18b590291a4..afe232b9df6e5b6c83779712c8cd068169992a55 100644 (file)
@@ -23,7 +23,6 @@
 #include <linux/slab.h>
 #include <linux/mount.h>
 #include <linux/magic.h>
-#include <linux/namei.h>
 
 #include <asm/uaccess.h>
 
@@ -394,16 +393,16 @@ static const struct file_operations proc_reg_file_ops_no_compat = {
 };
 #endif
 
-static void *proc_follow_link(struct dentry *dentry, struct nameidata *nd)
+static const char *proc_follow_link(struct dentry *dentry, void **cookie)
 {
        struct proc_dir_entry *pde = PDE(d_inode(dentry));
        if (unlikely(!use_pde(pde)))
                return ERR_PTR(-EINVAL);
-       nd_set_link(nd, pde->data);
-       return pde;
+       *cookie = pde;
+       return pde->data;
 }
 
-static void proc_put_link(struct dentry *dentry, struct nameidata *nd, void *p)
+static void proc_put_link(struct inode *unused, void *p)
 {
        unuse_pde(p);
 }
index e512642dbbdcb3cfe37f97b586770d695e2a755f..f6e8354b8cea20a936f6a4f8ae0335fd7fa36bd4 100644 (file)
@@ -30,7 +30,7 @@ static const struct proc_ns_operations *ns_entries[] = {
        &mntns_operations,
 };
 
-static void *proc_ns_follow_link(struct dentry *dentry, struct nameidata *nd)
+static const char *proc_ns_follow_link(struct dentry *dentry, void **cookie)
 {
        struct inode *inode = d_inode(dentry);
        const struct proc_ns_operations *ns_ops = PROC_I(inode)->ns_ops;
@@ -45,7 +45,7 @@ static void *proc_ns_follow_link(struct dentry *dentry, struct nameidata *nd)
        if (ptrace_may_access(task, PTRACE_MODE_READ)) {
                error = ns_get_path(&ns_path, task, ns_ops);
                if (!error)
-                       nd_jump_link(nd, &ns_path);
+                       nd_jump_link(&ns_path);
        }
        put_task_struct(task);
        return error;
index 6195b4a7c3b17f8c7feb09542e7e435b2cd59356..113b8d061fc023858ab152a5033e029d085f27a6 100644 (file)
@@ -1,5 +1,4 @@
 #include <linux/sched.h>
-#include <linux/namei.h>
 #include <linux/slab.h>
 #include <linux/pid_namespace.h>
 #include "internal.h"
@@ -19,21 +18,20 @@ static int proc_self_readlink(struct dentry *dentry, char __user *buffer,
        return readlink_copy(buffer, buflen, tmp);
 }
 
-static void *proc_self_follow_link(struct dentry *dentry, struct nameidata *nd)
+static const char *proc_self_follow_link(struct dentry *dentry, void **cookie)
 {
        struct pid_namespace *ns = dentry->d_sb->s_fs_info;
        pid_t tgid = task_tgid_nr_ns(current, ns);
-       char *name = ERR_PTR(-ENOENT);
-       if (tgid) {
-               /* 11 for max length of signed int in decimal + NULL term */
-               name = kmalloc(12, GFP_KERNEL);
-               if (!name)
-                       name = ERR_PTR(-ENOMEM);
-               else
-                       sprintf(name, "%d", tgid);
-       }
-       nd_set_link(nd, name);
-       return NULL;
+       char *name;
+
+       if (!tgid)
+               return ERR_PTR(-ENOENT);
+       /* 11 for max length of signed int in decimal + NULL term */
+       name = kmalloc(12, GFP_KERNEL);
+       if (!name)
+               return ERR_PTR(-ENOMEM);
+       sprintf(name, "%d", tgid);
+       return *cookie = name;
 }
 
 static const struct inode_operations proc_self_inode_operations = {
index a8371993b4fb7822b865cd4b2a02bc63d9642fa7..947b0f4fd0a194057334762bafeff3548c276568 100644 (file)
@@ -1,5 +1,4 @@
 #include <linux/sched.h>
-#include <linux/namei.h>
 #include <linux/slab.h>
 #include <linux/pid_namespace.h>
 #include "internal.h"
@@ -20,21 +19,20 @@ static int proc_thread_self_readlink(struct dentry *dentry, char __user *buffer,
        return readlink_copy(buffer, buflen, tmp);
 }
 
-static void *proc_thread_self_follow_link(struct dentry *dentry, struct nameidata *nd)
+static const char *proc_thread_self_follow_link(struct dentry *dentry, void **cookie)
 {
        struct pid_namespace *ns = dentry->d_sb->s_fs_info;
        pid_t tgid = task_tgid_nr_ns(current, ns);
        pid_t pid = task_pid_nr_ns(current, ns);
-       char *name = ERR_PTR(-ENOENT);
-       if (pid) {
-               name = kmalloc(PROC_NUMBUF + 6 + PROC_NUMBUF, GFP_KERNEL);
-               if (!name)
-                       name = ERR_PTR(-ENOMEM);
-               else
-                       sprintf(name, "%d/task/%d", tgid, pid);
-       }
-       nd_set_link(nd, name);
-       return NULL;
+       char *name;
+
+       if (!pid)
+               return ERR_PTR(-ENOENT);
+       name = kmalloc(PROC_NUMBUF + 6 + PROC_NUMBUF, GFP_KERNEL);
+       if (!name)
+               return ERR_PTR(-ENOMEM);
+       sprintf(name, "%d/task/%d", tgid, pid);
+       return *cookie = name;
 }
 
 static const struct inode_operations proc_thread_self_inode_operations = {
index f684c750e08a549283733c1340420a61733b4b61..015547330e88699dccb37fe1d4509e1dc07394f7 100644 (file)
@@ -189,7 +189,7 @@ static int __pollwake(wait_queue_t *wait, unsigned mode, int sync, void *key)
         * doesn't imply write barrier and the users expect write
         * barrier semantics on wakeup functions.  The following
         * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
-        * and is paired with set_mb() in poll_schedule_timeout.
+        * and is paired with smp_store_mb() in poll_schedule_timeout.
         */
        smp_wmb();
        pwq->triggered = 1;
@@ -244,7 +244,7 @@ int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
        /*
         * Prepare for the next iteration.
         *
-        * The following set_mb() serves two purposes.  First, it's
+        * The following smp_store_mb() serves two purposes.  First, it's
         * the counterpart rmb of the wmb in pollwake() such that data
         * written before wake up is always visible after wake up.
         * Second, the full barrier guarantees that triggered clearing
@@ -252,7 +252,7 @@ int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
         * this problem doesn't exist for the first iteration as
         * add_wait_queue() has full barrier semantics.
         */
-       set_mb(pwq->triggered, 0);
+       smp_store_mb(pwq->triggered, 0);
 
        return rc;
 }
index 476024bb6546527887517868b122c9305dc32d07..bfe62ae40f40920e6b95fa8ce16cc3130b8b0972 100644 (file)
@@ -1161,7 +1161,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
        long ret, bytes;
        umode_t i_mode;
        size_t len;
-       int i, flags;
+       int i, flags, more;
 
        /*
         * We require the input being a regular file, as we don't want to
@@ -1204,6 +1204,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
         * Don't block on output, we have to drain the direct pipe.
         */
        sd->flags &= ~SPLICE_F_NONBLOCK;
+       more = sd->flags & SPLICE_F_MORE;
 
        while (len) {
                size_t read_len;
@@ -1216,6 +1217,15 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
                read_len = ret;
                sd->total_len = read_len;
 
+               /*
+                * If more data is pending, set SPLICE_F_MORE
+                * If this is the last data and SPLICE_F_MORE was not set
+                * initially, clears it.
+                */
+               if (read_len < len)
+                       sd->flags |= SPLICE_F_MORE;
+               else if (!more)
+                       sd->flags &= ~SPLICE_F_MORE;
                /*
                 * NOTE: nonblocking mode only applies to the input. We
                 * must not do the output in nonblocking mode as then we
index 3591f9d7a48a4a38948d30f7b523103471b85889..7a75e70a4b61b9d9fdc9fdd5d087be105304d62e 100644 (file)
@@ -5,4 +5,4 @@
 obj-$(CONFIG_SYSV_FS) += sysv.o
 
 sysv-objs := ialloc.o balloc.o inode.o itree.o file.o dir.o \
-            namei.o super.o symlink.o
+            namei.o super.o
index 88956309cc86ab6d6d614315651ced6778647347..590ad9206e3f4e761d2c2ad95cfaedfa76b47e16 100644 (file)
@@ -166,8 +166,9 @@ void sysv_set_inode(struct inode *inode, dev_t rdev)
                        inode->i_op = &sysv_symlink_inode_operations;
                        inode->i_mapping->a_ops = &sysv_aops;
                } else {
-                       inode->i_op = &sysv_fast_symlink_inode_operations;
-                       nd_terminate_link(SYSV_I(inode)->i_data, inode->i_size,
+                       inode->i_op = &simple_symlink_inode_operations;
+                       inode->i_link = (char *)SYSV_I(inode)->i_data;
+                       nd_terminate_link(inode->i_link, inode->i_size,
                                sizeof(SYSV_I(inode)->i_data) - 1);
                }
        } else
diff --git a/fs/sysv/symlink.c b/fs/sysv/symlink.c
deleted file mode 100644 (file)
index d3fa0d7..0000000
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- *  linux/fs/sysv/symlink.c
- *
- *  Handling of System V filesystem fast symlinks extensions.
- *  Aug 2001, Christoph Hellwig (hch@infradead.org)
- */
-
-#include "sysv.h"
-#include <linux/namei.h>
-
-static void *sysv_follow_link(struct dentry *dentry, struct nameidata *nd)
-{
-       nd_set_link(nd, (char *)SYSV_I(d_inode(dentry))->i_data);
-       return NULL;
-}
-
-const struct inode_operations sysv_fast_symlink_inode_operations = {
-       .readlink       = generic_readlink,
-       .follow_link    = sysv_follow_link,
-};
index 69d488986cce4923860c6d1f4c7ab325370fc5ce..2c13525131cd8146dd19d6a07086a70afc97b25a 100644 (file)
@@ -161,7 +161,6 @@ extern ino_t sysv_inode_by_name(struct dentry *);
 
 extern const struct inode_operations sysv_file_inode_operations;
 extern const struct inode_operations sysv_dir_inode_operations;
-extern const struct inode_operations sysv_fast_symlink_inode_operations;
 extern const struct file_operations sysv_file_operations;
 extern const struct file_operations sysv_dir_operations;
 extern const struct address_space_operations sysv_aops;
index 27060fc855d42549b2bbe7d3cbe6329858e3fb2c..5c27c66c224af38618ec4f92b453bd8e65d24608 100644 (file)
@@ -889,6 +889,7 @@ static int ubifs_symlink(struct inode *dir, struct dentry *dentry,
 
        memcpy(ui->data, symname, len);
        ((char *)ui->data)[len] = '\0';
+       inode->i_link = ui->data;
        /*
         * The terminating zero byte is not written to the flash media and it
         * is put just to make later in-memory string processing simpler. Thus,
index 35efc103c39c102215cebb0792b6b4f24d49b90d..a3dfe2ae79f28592a0ba01feb6d0b1889345957c 100644 (file)
@@ -51,7 +51,6 @@
 
 #include "ubifs.h"
 #include <linux/mount.h>
-#include <linux/namei.h>
 #include <linux/slab.h>
 
 static int read_block(struct inode *inode, void *addr, unsigned int block,
@@ -1300,14 +1299,6 @@ static void ubifs_invalidatepage(struct page *page, unsigned int offset,
        ClearPageChecked(page);
 }
 
-static void *ubifs_follow_link(struct dentry *dentry, struct nameidata *nd)
-{
-       struct ubifs_inode *ui = ubifs_inode(d_inode(dentry));
-
-       nd_set_link(nd, ui->data);
-       return NULL;
-}
-
 int ubifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
 {
        struct inode *inode = file->f_mapping->host;
@@ -1570,7 +1561,7 @@ const struct inode_operations ubifs_file_inode_operations = {
 
 const struct inode_operations ubifs_symlink_inode_operations = {
        .readlink    = generic_readlink,
-       .follow_link = ubifs_follow_link,
+       .follow_link = simple_follow_link,
        .setattr     = ubifs_setattr,
        .getattr     = ubifs_getattr,
        .setxattr    = ubifs_setxattr,
index 75e6f04bb795a9605a5d8c7cf6e4607b89a5f7cf..20f5dbd7c6a8b6bee476dc6139b803c1e14d5a47 100644 (file)
@@ -195,6 +195,7 @@ struct inode *ubifs_iget(struct super_block *sb, unsigned long inum)
                }
                memcpy(ui->data, ino->data, ui->data_len);
                ((char *)ui->data)[ui->data_len] = '\0';
+               inode->i_link = ui->data;
                break;
        case S_IFBLK:
        case S_IFCHR:
index be7d42c7d9382bf8072a7e6e9eff7e03b24bab25..99aaf5c9bf4d83f0f5ec6469d6d6cb20827750d0 100644 (file)
@@ -572,9 +572,10 @@ static void ufs_set_inode_ops(struct inode *inode)
                inode->i_fop = &ufs_dir_operations;
                inode->i_mapping->a_ops = &ufs_aops;
        } else if (S_ISLNK(inode->i_mode)) {
-               if (!inode->i_blocks)
+               if (!inode->i_blocks) {
                        inode->i_op = &ufs_fast_symlink_inode_operations;
-               else {
+                       inode->i_link = (char *)UFS_I(inode)->i_u1.i_symlink;
+               } else {
                        inode->i_op = &ufs_symlink_inode_operations;
                        inode->i_mapping->a_ops = &ufs_aops;
                }
index e491a93a7e9af14c4227ee5072fa4a9d0bc17709..f773deb1d2e3fd561b906a0dd2a050d16161a5be 100644 (file)
@@ -144,7 +144,8 @@ static int ufs_symlink (struct inode * dir, struct dentry * dentry,
        } else {
                /* fast symlink */
                inode->i_op = &ufs_fast_symlink_inode_operations;
-               memcpy(UFS_I(inode)->i_u1.i_symlink, symname, l);
+               inode->i_link = (char *)UFS_I(inode)->i_u1.i_symlink;
+               memcpy(inode->i_link, symname, l);
                inode->i_size = l-1;
        }
        mark_inode_dirty(inode);
index 5b537e2fdda385a0da9aa346e93e05e83084e1dd..874480bb43e9d08190707d7d2ed30aee02eec531 100644 (file)
  *  ext2 symlink handling code
  */
 
-#include <linux/fs.h>
-#include <linux/namei.h>
-
 #include "ufs_fs.h"
 #include "ufs.h"
 
-
-static void *ufs_follow_link(struct dentry *dentry, struct nameidata *nd)
-{
-       struct ufs_inode_info *p = UFS_I(d_inode(dentry));
-       nd_set_link(nd, (char*)p->i_u1.i_symlink);
-       return NULL;
-}
-
 const struct inode_operations ufs_fast_symlink_inode_operations = {
        .readlink       = generic_readlink,
-       .follow_link    = ufs_follow_link,
+       .follow_link    = simple_follow_link,
        .setattr        = ufs_setattr,
 };
 
index 04e79d57bca600b4a21cd0a2a936639d9556e6c8..e9d401ce93bb19d822a2ec9b475dae7ad5d279c1 100644 (file)
@@ -574,8 +574,8 @@ xfs_attr_shortform_add(xfs_da_args_t *args, int forkoff)
  * After the last attribute is removed revert to original inode format,
  * making all literal area available to the data fork once more.
  */
-STATIC void
-xfs_attr_fork_reset(
+void
+xfs_attr_fork_remove(
        struct xfs_inode        *ip,
        struct xfs_trans        *tp)
 {
@@ -641,7 +641,7 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
            (mp->m_flags & XFS_MOUNT_ATTR2) &&
            (dp->i_d.di_format != XFS_DINODE_FMT_BTREE) &&
            !(args->op_flags & XFS_DA_OP_ADDNAME)) {
-               xfs_attr_fork_reset(dp, args->trans);
+               xfs_attr_fork_remove(dp, args->trans);
        } else {
                xfs_idata_realloc(dp, -size, XFS_ATTR_FORK);
                dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize);
@@ -905,7 +905,7 @@ xfs_attr3_leaf_to_shortform(
        if (forkoff == -1) {
                ASSERT(dp->i_mount->m_flags & XFS_MOUNT_ATTR2);
                ASSERT(dp->i_d.di_format != XFS_DINODE_FMT_BTREE);
-               xfs_attr_fork_reset(dp, args->trans);
+               xfs_attr_fork_remove(dp, args->trans);
                goto out;
        }
 
index 025c4b820c03a1c642c18d9c53d46e08a0ab2d94..882c8d3388913b3d44aa9105184920feb89709a6 100644 (file)
@@ -53,7 +53,7 @@ int   xfs_attr_shortform_remove(struct xfs_da_args *args);
 int    xfs_attr_shortform_list(struct xfs_attr_list_context *context);
 int    xfs_attr_shortform_allfit(struct xfs_buf *bp, struct xfs_inode *dp);
 int    xfs_attr_shortform_bytesfit(xfs_inode_t *dp, int bytes);
-
+void   xfs_attr_fork_remove(struct xfs_inode *ip, struct xfs_trans *tp);
 
 /*
  * Internal routines when attribute fork size == XFS_LBSIZE(mp).
index aeffeaaac0ec406e543730eb608de1eb3ebc40fb..f1026e86dabc9a00ead716785a3acb5c19ee8e10 100644 (file)
@@ -3224,12 +3224,24 @@ xfs_bmap_extsize_align(
                align_alen += temp;
                align_off -= temp;
        }
+
+       /* Same adjustment for the end of the requested area. */
+       temp = (align_alen % extsz);
+       if (temp)
+               align_alen += extsz - temp;
+
        /*
-        * Same adjustment for the end of the requested area.
+        * For large extent hint sizes, the aligned extent might be larger than
+        * MAXEXTLEN. In that case, reduce the size by an extsz so that it pulls
+        * the length back under MAXEXTLEN. The outer allocation loops handle
+        * short allocation just fine, so it is safe to do this. We only want to
+        * do it when we are forced to, though, because it means more allocation
+        * operations are required.
         */
-       if ((temp = (align_alen % extsz))) {
-               align_alen += extsz - temp;
-       }
+       while (align_alen > MAXEXTLEN)
+               align_alen -= extsz;
+       ASSERT(align_alen <= MAXEXTLEN);
+
        /*
         * If the previous block overlaps with this proposed allocation
         * then move the start forward without adjusting the length.
@@ -3318,7 +3330,9 @@ xfs_bmap_extsize_align(
                        return -EINVAL;
        } else {
                ASSERT(orig_off >= align_off);
-               ASSERT(orig_end <= align_off + align_alen);
+               /* see MAXEXTLEN handling above */
+               ASSERT(orig_end <= align_off + align_alen ||
+                      align_alen + extsz > MAXEXTLEN);
        }
 
 #ifdef DEBUG
@@ -4099,13 +4113,6 @@ xfs_bmapi_reserve_delalloc(
        /* Figure out the extent size, adjust alen */
        extsz = xfs_get_extsz_hint(ip);
        if (extsz) {
-               /*
-                * Make sure we don't exceed a single extent length when we
-                * align the extent by reducing length we are going to
-                * allocate by the maximum amount extent size aligment may
-                * require.
-                */
-               alen = XFS_FILBLKS_MIN(len, MAXEXTLEN - (2 * extsz - 1));
                error = xfs_bmap_extsize_align(mp, got, prev, extsz, rt, eof,
                                               1, 0, &aoff, &alen);
                ASSERT(!error);
index 07349a183a110fdf57bdf9a7f1d704452de5cdb7..1c9e75521250ecf606639578ce79696b6ff4a682 100644 (file)
@@ -376,7 +376,7 @@ xfs_ialloc_ag_alloc(
         */
        newlen = args.mp->m_ialloc_inos;
        if (args.mp->m_maxicount &&
-           percpu_counter_read(&args.mp->m_icount) + newlen >
+           percpu_counter_read_positive(&args.mp->m_icount) + newlen >
                                                        args.mp->m_maxicount)
                return -ENOSPC;
        args.minlen = args.maxlen = args.mp->m_ialloc_blks;
@@ -1339,10 +1339,13 @@ xfs_dialloc(
         * If we have already hit the ceiling of inode blocks then clear
         * okalloc so we scan all available agi structures for a free
         * inode.
+        *
+        * Read rough value of mp->m_icount by percpu_counter_read_positive,
+        * which will sacrifice the preciseness but improve the performance.
         */
        if (mp->m_maxicount &&
-           percpu_counter_read(&mp->m_icount) + mp->m_ialloc_inos >
-                                                       mp->m_maxicount) {
+           percpu_counter_read_positive(&mp->m_icount) + mp->m_ialloc_inos
+                                                       mp->m_maxicount) {
                noroom = 1;
                okalloc = 0;
        }
index f9c1c64782d39ec36fabf800653772f8c5b24280..3fbf167cfb4cddfcb42a57ca7d613096d5c97fe0 100644 (file)
@@ -380,23 +380,31 @@ xfs_attr3_root_inactive(
        return error;
 }
 
+/*
+ * xfs_attr_inactive kills all traces of an attribute fork on an inode. It
+ * removes both the on-disk and in-memory inode fork. Note that this also has to
+ * handle the condition of inodes without attributes but with an attribute fork
+ * configured, so we can't use xfs_inode_hasattr() here.
+ *
+ * The in-memory attribute fork is removed even on error.
+ */
 int
-xfs_attr_inactive(xfs_inode_t *dp)
+xfs_attr_inactive(
+       struct xfs_inode        *dp)
 {
-       xfs_trans_t *trans;
-       xfs_mount_t *mp;
-       int error;
+       struct xfs_trans        *trans;
+       struct xfs_mount        *mp;
+       int                     cancel_flags = 0;
+       int                     lock_mode = XFS_ILOCK_SHARED;
+       int                     error = 0;
 
        mp = dp->i_mount;
        ASSERT(! XFS_NOT_DQATTACHED(mp, dp));
 
-       xfs_ilock(dp, XFS_ILOCK_SHARED);
-       if (!xfs_inode_hasattr(dp) ||
-           dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
-               xfs_iunlock(dp, XFS_ILOCK_SHARED);
-               return 0;
-       }
-       xfs_iunlock(dp, XFS_ILOCK_SHARED);
+       xfs_ilock(dp, lock_mode);
+       if (!XFS_IFORK_Q(dp))
+               goto out_destroy_fork;
+       xfs_iunlock(dp, lock_mode);
 
        /*
         * Start our first transaction of the day.
@@ -408,13 +416,18 @@ xfs_attr_inactive(xfs_inode_t *dp)
         * the inode in every transaction to let it float upward through
         * the log.
         */
+       lock_mode = 0;
        trans = xfs_trans_alloc(mp, XFS_TRANS_ATTRINVAL);
        error = xfs_trans_reserve(trans, &M_RES(mp)->tr_attrinval, 0, 0);
-       if (error) {
-               xfs_trans_cancel(trans, 0);
-               return error;
-       }
-       xfs_ilock(dp, XFS_ILOCK_EXCL);
+       if (error)
+               goto out_cancel;
+
+       lock_mode = XFS_ILOCK_EXCL;
+       cancel_flags = XFS_TRANS_RELEASE_LOG_RES | XFS_TRANS_ABORT;
+       xfs_ilock(dp, lock_mode);
+
+       if (!XFS_IFORK_Q(dp))
+               goto out_cancel;
 
        /*
         * No need to make quota reservations here. We expect to release some
@@ -422,29 +435,31 @@ xfs_attr_inactive(xfs_inode_t *dp)
         */
        xfs_trans_ijoin(trans, dp, 0);
 
-       /*
-        * Decide on what work routines to call based on the inode size.
-        */
-       if (!xfs_inode_hasattr(dp) ||
-           dp->i_d.di_aformat == XFS_DINODE_FMT_LOCAL) {
-               error = 0;
-               goto out;
+       /* invalidate and truncate the attribute fork extents */
+       if (dp->i_d.di_aformat != XFS_DINODE_FMT_LOCAL) {
+               error = xfs_attr3_root_inactive(&trans, dp);
+               if (error)
+                       goto out_cancel;
+
+               error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0);
+               if (error)
+                       goto out_cancel;
        }
-       error = xfs_attr3_root_inactive(&trans, dp);
-       if (error)
-               goto out;
 
-       error = xfs_itruncate_extents(&trans, dp, XFS_ATTR_FORK, 0);
-       if (error)
-               goto out;
+       /* Reset the attribute fork - this also destroys the in-core fork */
+       xfs_attr_fork_remove(dp, trans);
 
        error = xfs_trans_commit(trans, XFS_TRANS_RELEASE_LOG_RES);
-       xfs_iunlock(dp, XFS_ILOCK_EXCL);
-
+       xfs_iunlock(dp, lock_mode);
        return error;
 
-out:
-       xfs_trans_cancel(trans, XFS_TRANS_RELEASE_LOG_RES|XFS_TRANS_ABORT);
-       xfs_iunlock(dp, XFS_ILOCK_EXCL);
+out_cancel:
+       xfs_trans_cancel(trans, cancel_flags);
+out_destroy_fork:
+       /* kill the in-core attr fork before we drop the inode lock */
+       if (dp->i_afp)
+               xfs_idestroy_fork(dp, XFS_ATTR_FORK);
+       if (lock_mode)
+               xfs_iunlock(dp, lock_mode);
        return error;
 }
index 8121e75352ee9bddd4726ca685d6d3e855256bdd..3b7591224f4a6698d32371a927e70cb2a391f4a9 100644 (file)
@@ -124,7 +124,7 @@ xfs_iozero(
                status = 0;
        } while (count);
 
-       return (-status);
+       return status;
 }
 
 int
index d6ebc85192b7b3f4fd21e3cbc25ccb5f54501319..539a85fddbc26864004e80f5fb229c6c2de565b8 100644 (file)
@@ -1946,21 +1946,17 @@ xfs_inactive(
        /*
         * If there are attributes associated with the file then blow them away
         * now.  The code calls a routine that recursively deconstructs the
-        * attribute fork.  We need to just commit the current transaction
-        * because we can't use it for xfs_attr_inactive().
+        * attribute fork. If also blows away the in-core attribute fork.
         */
-       if (ip->i_d.di_anextents > 0) {
-               ASSERT(ip->i_d.di_forkoff != 0);
-
+       if (XFS_IFORK_Q(ip)) {
                error = xfs_attr_inactive(ip);
                if (error)
                        return;
        }
 
-       if (ip->i_afp)
-               xfs_idestroy_fork(ip, XFS_ATTR_FORK);
-
+       ASSERT(!ip->i_afp);
        ASSERT(ip->i_d.di_anextents == 0);
+       ASSERT(ip->i_d.di_forkoff == 0);
 
        /*
         * Free the inode.
@@ -2883,7 +2879,13 @@ xfs_rename_alloc_whiteout(
        if (error)
                return error;
 
-       /* Satisfy xfs_bumplink that this is a real tmpfile */
+       /*
+        * Prepare the tmpfile inode as if it were created through the VFS.
+        * Otherwise, the link increment paths will complain about nlink 0->1.
+        * Drop the link count as done by d_tmpfile(), complete the inode setup
+        * and flag it as linkable.
+        */
+       drop_nlink(VFS_I(tmpfile));
        xfs_finish_inode_setup(tmpfile);
        VFS_I(tmpfile)->i_state |= I_LINKABLE;
 
@@ -3151,7 +3153,7 @@ xfs_rename(
         * intermediate state on disk.
         */
        if (wip) {
-               ASSERT(wip->i_d.di_nlink == 0);
+               ASSERT(VFS_I(wip)->i_nlink == 0 && wip->i_d.di_nlink == 0);
                error = xfs_bumplink(tp, wip);
                if (error)
                        goto out_trans_abort;
index f4cd7204e23667724c01a4c4b8efe8c1d48b1cb3..7f51f39f8acc0a2fd407a7be57c1477439213a0f 100644 (file)
@@ -41,7 +41,6 @@
 
 #include <linux/capability.h>
 #include <linux/xattr.h>
-#include <linux/namei.h>
 #include <linux/posix_acl.h>
 #include <linux/security.h>
 #include <linux/fiemap.h>
@@ -414,10 +413,10 @@ xfs_vn_rename(
  * we need to be very careful about how much stack we use.
  * uio is kmalloced for this reason...
  */
-STATIC void *
+STATIC const char *
 xfs_vn_follow_link(
        struct dentry           *dentry,
-       struct nameidata        *nd)
+       void                    **cookie)
 {
        char                    *link;
        int                     error = -ENOMEM;
@@ -430,14 +429,12 @@ xfs_vn_follow_link(
        if (unlikely(error))
                goto out_kfree;
 
-       nd_set_link(nd, link);
-       return NULL;
+       return *cookie = link;
 
  out_kfree:
        kfree(link);
  out_err:
-       nd_set_link(nd, ERR_PTR(error));
-       return NULL;
+       return ERR_PTR(error);
 }
 
 STATIC int
index 2ce7ee3b4ec1fdb9e9344a1ec7ea3a2df5a3b29c..6f23fbdfb365adca1571eadece38b77a619c50ad 100644 (file)
@@ -1084,14 +1084,18 @@ xfs_log_sbcount(xfs_mount_t *mp)
        return xfs_sync_sb(mp, true);
 }
 
+/*
+ * Deltas for the inode count are +/-64, hence we use a large batch size
+ * of 128 so we don't need to take the counter lock on every update.
+ */
+#define XFS_ICOUNT_BATCH       128
 int
 xfs_mod_icount(
        struct xfs_mount        *mp,
        int64_t                 delta)
 {
-       /* deltas are +/-64, hence the large batch size of 128. */
-       __percpu_counter_add(&mp->m_icount, delta, 128);
-       if (percpu_counter_compare(&mp->m_icount, 0) < 0) {
+       __percpu_counter_add(&mp->m_icount, delta, XFS_ICOUNT_BATCH);
+       if (__percpu_counter_compare(&mp->m_icount, 0, XFS_ICOUNT_BATCH) < 0) {
                ASSERT(0);
                percpu_counter_add(&mp->m_icount, -delta);
                return -EINVAL;
@@ -1113,6 +1117,14 @@ xfs_mod_ifree(
        return 0;
 }
 
+/*
+ * Deltas for the block count can vary from 1 to very large, but lock contention
+ * only occurs on frequent small block count updates such as in the delayed
+ * allocation path for buffered writes (page a time updates). Hence we set
+ * a large batch count (1024) to minimise global counter updates except when
+ * we get near to ENOSPC and we have to be very accurate with our updates.
+ */
+#define XFS_FDBLOCKS_BATCH     1024
 int
 xfs_mod_fdblocks(
        struct xfs_mount        *mp,
@@ -1151,25 +1163,19 @@ xfs_mod_fdblocks(
         * Taking blocks away, need to be more accurate the closer we
         * are to zero.
         *
-        * batch size is set to a maximum of 1024 blocks - if we are
-        * allocating of freeing extents larger than this then we aren't
-        * going to be hammering the counter lock so a lock per update
-        * is not a problem.
-        *
         * If the counter has a value of less than 2 * max batch size,
         * then make everything serialise as we are real close to
         * ENOSPC.
         */
-#define __BATCH        1024
-       if (percpu_counter_compare(&mp->m_fdblocks, 2 * __BATCH) < 0)
+       if (__percpu_counter_compare(&mp->m_fdblocks, 2 * XFS_FDBLOCKS_BATCH,
+                                    XFS_FDBLOCKS_BATCH) < 0)
                batch = 1;
        else
-               batch = __BATCH;
-#undef __BATCH
+               batch = XFS_FDBLOCKS_BATCH;
 
        __percpu_counter_add(&mp->m_fdblocks, delta, batch);
-       if (percpu_counter_compare(&mp->m_fdblocks,
-                                  XFS_ALLOC_SET_ASIDE(mp)) >= 0) {
+       if (__percpu_counter_compare(&mp->m_fdblocks, XFS_ALLOC_SET_ASIDE(mp),
+                                    XFS_FDBLOCKS_BATCH) >= 0) {
                /* we had space! */
                return 0;
        }
index f5c40b0fadc2a50be563304727db2e7ad7fe6699..e6a83d712ef6772ac7265e914837da7806e21441 100644 (file)
@@ -66,8 +66,8 @@
 #define smp_read_barrier_depends()     do { } while (0)
 #endif
 
-#ifndef set_mb
-#define set_mb(var, value)  do { (var) = (value); mb(); } while (0)
+#ifndef smp_store_mb
+#define smp_store_mb(var, value)  do { WRITE_ONCE(var, value); mb(); } while (0)
 #endif
 
 #ifndef smp_mb__before_atomic
index 811fb1e9b06131303d41f07797becb024e61d77f..3766ab34aa45afae86287a7c213c3311c88b4be2 100644 (file)
@@ -86,9 +86,6 @@ unsigned long __xchg(unsigned long x, volatile void *ptr, int size)
 
 /*
  * Atomic compare and exchange.
- *
- * Do not define __HAVE_ARCH_CMPXCHG because we want to use it to check whether
- * a cmpxchg primitive faster than repeated local irq save/restore exists.
  */
 #include <asm-generic/cmpxchg-local.h>
 
index b59b5a52637ec78397262862b06796b5d2fa2329..e56272c919b5a688e1739cdefaa08c121e26f5a8 100644 (file)
@@ -8,8 +8,7 @@
 #ifndef CONFIG_SMP
 /*
  * The following implementation only for uniprocessor machines.
- * For UP, it's relies on the fact that pagefault_disable() also disables
- * preemption to ensure mutual exclusion.
+ * It relies on preempt_disable() ensuring mutual exclusion.
  *
  */
 
@@ -38,6 +37,7 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
        if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
                oparg = 1 << oparg;
 
+       preempt_disable();
        pagefault_disable();
 
        ret = -EFAULT;
@@ -72,6 +72,7 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr)
 
 out_pagefault_enable:
        pagefault_enable();
+       preempt_enable();
 
        if (ret == 0) {
                switch (cmp) {
@@ -106,6 +107,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
 {
        u32 val;
 
+       preempt_disable();
        if (unlikely(get_user(val, uaddr) != 0))
                return -EFAULT;
 
@@ -113,6 +115,7 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
                return -EFAULT;
 
        *uval = val;
+       preempt_enable();
 
        return 0;
 }
index 9db042304df37d0c97eea09be466fa403d505603..f56094cfdeff0e0f312045212bcf19e8a99882db 100644 (file)
@@ -769,6 +769,14 @@ static inline void __iomem *ioremap_nocache(phys_addr_t offset, size_t size)
 }
 #endif
 
+#ifndef ioremap_uc
+#define ioremap_uc ioremap_uc
+static inline void __iomem *ioremap_uc(phys_addr_t offset, size_t size)
+{
+       return ioremap_nocache(offset, size);
+}
+#endif
+
 #ifndef ioremap_wc
 #define ioremap_wc ioremap_wc
 static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size)
@@ -777,8 +785,17 @@ static inline void __iomem *ioremap_wc(phys_addr_t offset, size_t size)
 }
 #endif
 
+#ifndef ioremap_wt
+#define ioremap_wt ioremap_wt
+static inline void __iomem *ioremap_wt(phys_addr_t offset, size_t size)
+{
+       return ioremap_nocache(offset, size);
+}
+#endif
+
 #ifndef iounmap
 #define iounmap iounmap
+
 static inline void iounmap(void __iomem *addr)
 {
 }
index 1b41011643a5ebb694742f7a11ba6c54bba35a16..d8f8622fa044dbaeba223bc373f57dad5dac6070 100644 (file)
@@ -66,6 +66,10 @@ extern void ioport_unmap(void __iomem *);
 #define ioremap_wc ioremap_nocache
 #endif
 
+#ifndef ARCH_HAS_IOREMAP_WT
+#define ioremap_wt ioremap_nocache
+#endif
+
 #ifdef CONFIG_PCI
 /* Destroy a virtual mapping cookie for a PCI BAR (memory or IO) */
 struct pci_dev;
index 39f1d6a2b04d4292f2dbd01b59bc748484824eff..bd910ceaccfa2d5b66da77b1b5becc760c50a615 100644 (file)
@@ -262,6 +262,10 @@ static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
 #define pgprot_writecombine pgprot_noncached
 #endif
 
+#ifndef pgprot_writethrough
+#define pgprot_writethrough pgprot_noncached
+#endif
+
 #ifndef pgprot_device
 #define pgprot_device pgprot_noncached
 #endif
index eb6f9e6c30756f5f39605582cf34c0e48c6a3df1..d0a7a4753db2b3cb516ceed3c331ad330b92e877 100644 (file)
@@ -79,11 +79,8 @@ static __always_inline bool should_resched(void)
 #ifdef CONFIG_PREEMPT
 extern asmlinkage void preempt_schedule(void);
 #define __preempt_schedule() preempt_schedule()
-
-#ifdef CONFIG_CONTEXT_TRACKING
-extern asmlinkage void preempt_schedule_context(void);
-#define __preempt_schedule_context() preempt_schedule_context()
-#endif
+extern asmlinkage void preempt_schedule_notrace(void);
+#define __preempt_schedule_notrace() preempt_schedule_notrace()
 #endif /* CONFIG_PREEMPT */
 
 #endif /* __ASM_PREEMPT_H */
diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
new file mode 100644 (file)
index 0000000..83bfb87
--- /dev/null
@@ -0,0 +1,139 @@
+/*
+ * Queued spinlock
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
+ *
+ * Authors: Waiman Long <waiman.long@hp.com>
+ */
+#ifndef __ASM_GENERIC_QSPINLOCK_H
+#define __ASM_GENERIC_QSPINLOCK_H
+
+#include <asm-generic/qspinlock_types.h>
+
+/**
+ * queued_spin_is_locked - is the spinlock locked?
+ * @lock: Pointer to queued spinlock structure
+ * Return: 1 if it is locked, 0 otherwise
+ */
+static __always_inline int queued_spin_is_locked(struct qspinlock *lock)
+{
+       return atomic_read(&lock->val);
+}
+
+/**
+ * queued_spin_value_unlocked - is the spinlock structure unlocked?
+ * @lock: queued spinlock structure
+ * Return: 1 if it is unlocked, 0 otherwise
+ *
+ * N.B. Whenever there are tasks waiting for the lock, it is considered
+ *      locked wrt the lockref code to avoid lock stealing by the lockref
+ *      code and change things underneath the lock. This also allows some
+ *      optimizations to be applied without conflict with lockref.
+ */
+static __always_inline int queued_spin_value_unlocked(struct qspinlock lock)
+{
+       return !atomic_read(&lock.val);
+}
+
+/**
+ * queued_spin_is_contended - check if the lock is contended
+ * @lock : Pointer to queued spinlock structure
+ * Return: 1 if lock contended, 0 otherwise
+ */
+static __always_inline int queued_spin_is_contended(struct qspinlock *lock)
+{
+       return atomic_read(&lock->val) & ~_Q_LOCKED_MASK;
+}
+/**
+ * queued_spin_trylock - try to acquire the queued spinlock
+ * @lock : Pointer to queued spinlock structure
+ * Return: 1 if lock acquired, 0 if failed
+ */
+static __always_inline int queued_spin_trylock(struct qspinlock *lock)
+{
+       if (!atomic_read(&lock->val) &&
+          (atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL) == 0))
+               return 1;
+       return 0;
+}
+
+extern void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val);
+
+/**
+ * queued_spin_lock - acquire a queued spinlock
+ * @lock: Pointer to queued spinlock structure
+ */
+static __always_inline void queued_spin_lock(struct qspinlock *lock)
+{
+       u32 val;
+
+       val = atomic_cmpxchg(&lock->val, 0, _Q_LOCKED_VAL);
+       if (likely(val == 0))
+               return;
+       queued_spin_lock_slowpath(lock, val);
+}
+
+#ifndef queued_spin_unlock
+/**
+ * queued_spin_unlock - release a queued spinlock
+ * @lock : Pointer to queued spinlock structure
+ */
+static __always_inline void queued_spin_unlock(struct qspinlock *lock)
+{
+       /*
+        * smp_mb__before_atomic() in order to guarantee release semantics
+        */
+       smp_mb__before_atomic_dec();
+       atomic_sub(_Q_LOCKED_VAL, &lock->val);
+}
+#endif
+
+/**
+ * queued_spin_unlock_wait - wait until current lock holder releases the lock
+ * @lock : Pointer to queued spinlock structure
+ *
+ * There is a very slight possibility of live-lock if the lockers keep coming
+ * and the waiter is just unfortunate enough to not see any unlock state.
+ */
+static inline void queued_spin_unlock_wait(struct qspinlock *lock)
+{
+       while (atomic_read(&lock->val) & _Q_LOCKED_MASK)
+               cpu_relax();
+}
+
+#ifndef virt_queued_spin_lock
+static __always_inline bool virt_queued_spin_lock(struct qspinlock *lock)
+{
+       return false;
+}
+#endif
+
+/*
+ * Initializier
+ */
+#define        __ARCH_SPIN_LOCK_UNLOCKED       { ATOMIC_INIT(0) }
+
+/*
+ * Remapping spinlock architecture specific functions to the corresponding
+ * queued spinlock functions.
+ */
+#define arch_spin_is_locked(l)         queued_spin_is_locked(l)
+#define arch_spin_is_contended(l)      queued_spin_is_contended(l)
+#define arch_spin_value_unlocked(l)    queued_spin_value_unlocked(l)
+#define arch_spin_lock(l)              queued_spin_lock(l)
+#define arch_spin_trylock(l)           queued_spin_trylock(l)
+#define arch_spin_unlock(l)            queued_spin_unlock(l)
+#define arch_spin_lock_flags(l, f)     queued_spin_lock(l)
+#define arch_spin_unlock_wait(l)       queued_spin_unlock_wait(l)
+
+#endif /* __ASM_GENERIC_QSPINLOCK_H */
diff --git a/include/asm-generic/qspinlock_types.h b/include/asm-generic/qspinlock_types.h
new file mode 100644 (file)
index 0000000..85f888e
--- /dev/null
@@ -0,0 +1,79 @@
+/*
+ * Queued spinlock
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
+ *
+ * Authors: Waiman Long <waiman.long@hp.com>
+ */
+#ifndef __ASM_GENERIC_QSPINLOCK_TYPES_H
+#define __ASM_GENERIC_QSPINLOCK_TYPES_H
+
+/*
+ * Including atomic.h with PARAVIRT on will cause compilation errors because
+ * of recursive header file incluson via paravirt_types.h. So don't include
+ * it if PARAVIRT is on.
+ */
+#ifndef CONFIG_PARAVIRT
+#include <linux/types.h>
+#include <linux/atomic.h>
+#endif
+
+typedef struct qspinlock {
+       atomic_t        val;
+} arch_spinlock_t;
+
+/*
+ * Bitfields in the atomic value:
+ *
+ * When NR_CPUS < 16K
+ *  0- 7: locked byte
+ *     8: pending
+ *  9-15: not used
+ * 16-17: tail index
+ * 18-31: tail cpu (+1)
+ *
+ * When NR_CPUS >= 16K
+ *  0- 7: locked byte
+ *     8: pending
+ *  9-10: tail index
+ * 11-31: tail cpu (+1)
+ */
+#define        _Q_SET_MASK(type)       (((1U << _Q_ ## type ## _BITS) - 1)\
+                                     << _Q_ ## type ## _OFFSET)
+#define _Q_LOCKED_OFFSET       0
+#define _Q_LOCKED_BITS         8
+#define _Q_LOCKED_MASK         _Q_SET_MASK(LOCKED)
+
+#define _Q_PENDING_OFFSET      (_Q_LOCKED_OFFSET + _Q_LOCKED_BITS)
+#if CONFIG_NR_CPUS < (1U << 14)
+#define _Q_PENDING_BITS                8
+#else
+#define _Q_PENDING_BITS                1
+#endif
+#define _Q_PENDING_MASK                _Q_SET_MASK(PENDING)
+
+#define _Q_TAIL_IDX_OFFSET     (_Q_PENDING_OFFSET + _Q_PENDING_BITS)
+#define _Q_TAIL_IDX_BITS       2
+#define _Q_TAIL_IDX_MASK       _Q_SET_MASK(TAIL_IDX)
+
+#define _Q_TAIL_CPU_OFFSET     (_Q_TAIL_IDX_OFFSET + _Q_TAIL_IDX_BITS)
+#define _Q_TAIL_CPU_BITS       (32 - _Q_TAIL_CPU_OFFSET)
+#define _Q_TAIL_CPU_MASK       _Q_SET_MASK(TAIL_CPU)
+
+#define _Q_TAIL_OFFSET         _Q_TAIL_IDX_OFFSET
+#define _Q_TAIL_MASK           (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)
+
+#define _Q_LOCKED_VAL          (1U << _Q_LOCKED_OFFSET)
+#define _Q_PENDING_VAL         (1U << _Q_PENDING_OFFSET)
+
+#endif /* __ASM_GENERIC_QSPINLOCK_TYPES_H */
index 2dd405c9be78d474fe4c1c11a70651b6741ab396..45c39a37f9249562761dc9615ffecf12ec194846 100644 (file)
        {0x1002, 0x6658, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x665c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x665d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
+       {0x1002, 0x665f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_BONAIRE|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6660, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6663, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
        {0x1002, 0x6664, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_HAINAN|RADEON_IS_MOBILITY|RADEON_NEW_MEMMAP}, \
index a899402a5a0e6325c0b3dce2f03c7dca06846238..52f3b7da4f2d4dbb181aa1fe5c8d73e4df8f1814 100644 (file)
@@ -43,8 +43,8 @@ struct alarm {
 
 void alarm_init(struct alarm *alarm, enum alarmtimer_type type,
                enum alarmtimer_restart (*function)(struct alarm *, ktime_t));
-int alarm_start(struct alarm *alarm, ktime_t start);
-int alarm_start_relative(struct alarm *alarm, ktime_t start);
+void alarm_start(struct alarm *alarm, ktime_t start);
+void alarm_start_relative(struct alarm *alarm, ktime_t start);
 void alarm_restart(struct alarm *alarm);
 int alarm_try_to_cancel(struct alarm *alarm);
 int alarm_cancel(struct alarm *alarm);
index aff923ae8c4b963272563759b9ac52ad55778bd0..d87d8eced06407c59c6d231f9e707bdcc398ce52 100644 (file)
@@ -116,7 +116,6 @@ __printf(3, 4)
 int bdi_register(struct backing_dev_info *bdi, struct device *parent,
                const char *fmt, ...);
 int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
-void bdi_unregister(struct backing_dev_info *bdi);
 int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
 void bdi_start_writeback(struct backing_dev_info *bdi, long nr_pages,
                        enum wb_reason reason);
index a1b25e35ea5f9fc2978b7f62917c6b4e39c3dc75..b7299febc4b4adfee00cb8b05d6fbf6558f01547 100644 (file)
@@ -220,7 +220,7 @@ enum rq_flag_bits {
 
 /* This mask is used for both bio and request merge checking */
 #define REQ_NOMERGE_FLAGS \
-       (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
+       (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA | REQ_FLUSH_SEQ)
 
 #define REQ_RAHEAD             (1ULL << __REQ_RAHEAD)
 #define REQ_THROTTLED          (1ULL << __REQ_THROTTLED)
index 7f9a516f24dec57182f51cff3580db3904208a84..5d93a6645e88676a7d90a1ac55b5d5d6792da667 100644 (file)
@@ -821,8 +821,6 @@ extern int scsi_cmd_ioctl(struct request_queue *, struct gendisk *, fmode_t,
 extern int sg_scsi_ioctl(struct request_queue *, struct gendisk *, fmode_t,
                         struct scsi_ioctl_command __user *);
 
-extern void blk_queue_bio(struct request_queue *q, struct bio *bio);
-
 /*
  * A queue has just exitted congestion.  Note this in the global counter of
  * congested queues, and wake up anyone who was waiting for requests to be
index 86c12c93e3cf6ce9c1db4085b9ee986d2c1e5b9a..8fdcb783197d723a60dc548114af08cd6de9ca88 100644 (file)
@@ -2,7 +2,6 @@
 #define _LINUX_BH_H
 
 #include <linux/preempt.h>
-#include <linux/preempt_mask.h>
 
 #ifdef CONFIG_TRACE_IRQFLAGS
 extern void __local_bh_disable_ip(unsigned long ip, unsigned int cnt);
index ae2982c0f7a60ed93339e767feaf1fc89aa02134..656da2a12ffee319f67cb744945f028599e0603c 100644 (file)
@@ -17,7 +17,7 @@
 #define PHY_ID_BCM7250                 0xae025280
 #define PHY_ID_BCM7364                 0xae025260
 #define PHY_ID_BCM7366                 0x600d8490
-#define PHY_ID_BCM7425                 0x03625e60
+#define PHY_ID_BCM7425                 0x600d86b0
 #define PHY_ID_BCM7429                 0x600d8730
 #define PHY_ID_BCM7439                 0x600d8480
 #define PHY_ID_BCM7439_2               0xae025080
index 96c280b2c263476c053bdd0c514aa16df3d04212..597a1e836f223762da499dc3e1ff3689610a9deb 100644 (file)
@@ -37,12 +37,15 @@ enum clock_event_mode {
  *             reached from DETACHED or SHUTDOWN.
  * ONESHOT:    Device is programmed to generate event only once. Can be reached
  *             from DETACHED or SHUTDOWN.
+ * ONESHOT_STOPPED: Device was programmed in ONESHOT mode and is temporarily
+ *                 stopped.
  */
 enum clock_event_state {
        CLOCK_EVT_STATE_DETACHED,
        CLOCK_EVT_STATE_SHUTDOWN,
        CLOCK_EVT_STATE_PERIODIC,
        CLOCK_EVT_STATE_ONESHOT,
+       CLOCK_EVT_STATE_ONESHOT_STOPPED,
 };
 
 /*
@@ -84,12 +87,13 @@ enum clock_event_state {
  * @mult:              nanosecond to cycles multiplier
  * @shift:             nanoseconds to cycles divisor (power of two)
  * @mode:              operating mode, relevant only to ->set_mode(), OBSOLETE
- * @state:             current state of the device, assigned by the core code
+ * @state_use_accessors:current state of the device, assigned by the core code
  * @features:          features
  * @retries:           number of forced programming retries
  * @set_mode:          legacy set mode function, only for modes <= CLOCK_EVT_MODE_RESUME.
  * @set_state_periodic:        switch state to periodic, if !set_mode
  * @set_state_oneshot: switch state to oneshot, if !set_mode
+ * @set_state_oneshot_stopped: switch state to oneshot_stopped, if !set_mode
  * @set_state_shutdown:        switch state to shutdown, if !set_mode
  * @tick_resume:       resume clkevt device, if !set_mode
  * @broadcast:         function to broadcast events
@@ -113,7 +117,7 @@ struct clock_event_device {
        u32                     mult;
        u32                     shift;
        enum clock_event_mode   mode;
-       enum clock_event_state  state;
+       enum clock_event_state  state_use_accessors;
        unsigned int            features;
        unsigned long           retries;
 
@@ -121,11 +125,12 @@ struct clock_event_device {
         * State transition callback(s): Only one of the two groups should be
         * defined:
         * - set_mode(), only for modes <= CLOCK_EVT_MODE_RESUME.
-        * - set_state_{shutdown|periodic|oneshot}(), tick_resume().
+        * - set_state_{shutdown|periodic|oneshot|oneshot_stopped}(), tick_resume().
         */
        void                    (*set_mode)(enum clock_event_mode mode, struct clock_event_device *);
        int                     (*set_state_periodic)(struct clock_event_device *);
        int                     (*set_state_oneshot)(struct clock_event_device *);
+       int                     (*set_state_oneshot_stopped)(struct clock_event_device *);
        int                     (*set_state_shutdown)(struct clock_event_device *);
        int                     (*tick_resume)(struct clock_event_device *);
 
@@ -144,6 +149,32 @@ struct clock_event_device {
        struct module           *owner;
 } ____cacheline_aligned;
 
+/* Helpers to verify state of a clockevent device */
+static inline bool clockevent_state_detached(struct clock_event_device *dev)
+{
+       return dev->state_use_accessors == CLOCK_EVT_STATE_DETACHED;
+}
+
+static inline bool clockevent_state_shutdown(struct clock_event_device *dev)
+{
+       return dev->state_use_accessors == CLOCK_EVT_STATE_SHUTDOWN;
+}
+
+static inline bool clockevent_state_periodic(struct clock_event_device *dev)
+{
+       return dev->state_use_accessors == CLOCK_EVT_STATE_PERIODIC;
+}
+
+static inline bool clockevent_state_oneshot(struct clock_event_device *dev)
+{
+       return dev->state_use_accessors == CLOCK_EVT_STATE_ONESHOT;
+}
+
+static inline bool clockevent_state_oneshot_stopped(struct clock_event_device *dev)
+{
+       return dev->state_use_accessors == CLOCK_EVT_STATE_ONESHOT_STOPPED;
+}
+
 /*
  * Calculate a multiplication factor for scaled math, which is used to convert
  * nanoseconds based values to clock ticks:
index d27d0152271f9e8b487a48a9f2d74f51fe9a58a5..278dd279a7a8035e8be073a9664ea88f7357984a 100644 (file)
@@ -181,7 +181,6 @@ static inline s64 clocksource_cyc2ns(cycle_t cycles, u32 mult, u32 shift)
 
 extern int clocksource_unregister(struct clocksource*);
 extern void clocksource_touch_watchdog(void);
-extern struct clocksource* clocksource_get_next(void);
 extern void clocksource_change_rating(struct clocksource *cs, int rating);
 extern void clocksource_suspend(void);
 extern void clocksource_resume(void);
index 867722591be2c7e026e1b97c241e65e27e3b9d1b..05be2352fef889663fad482f57c4d8b9d5e18df4 100644 (file)
@@ -250,7 +250,23 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
        ({ union { typeof(x) __val; char __c[1]; } __u; __read_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
 
 #define WRITE_ONCE(x, val) \
-       ({ typeof(x) __val = (val); __write_once_size(&(x), &__val, sizeof(__val)); __val; })
+       ({ union { typeof(x) __val; char __c[1]; } __u = { .__val = (val) }; __write_once_size(&(x), __u.__c, sizeof(x)); __u.__val; })
+
+/**
+ * READ_ONCE_CTRL - Read a value heading a control dependency
+ * @x: The value to be read, heading the control dependency
+ *
+ * Control dependencies are tricky.  See Documentation/memory-barriers.txt
+ * for important information on how to use them.  Note that in many cases,
+ * use of smp_load_acquire() will be much simpler.  Control dependencies
+ * should be avoided except on the hottest of hotpaths.
+ */
+#define READ_ONCE_CTRL(x) \
+({ \
+       typeof(x) __val = READ_ONCE(x); \
+       smp_read_barrier_depends(); /* Enforce control dependency. */ \
+       __val; \
+})
 
 #endif /* __KERNEL__ */
 
@@ -450,7 +466,7 @@ static __always_inline void __write_once_size(volatile void *p, void *res, int s
  * with an explicit memory barrier or atomic instruction that provides the
  * required ordering.
  *
- * If possible use READ_ONCE/ASSIGN_ONCE instead.
+ * If possible use READ_ONCE()/WRITE_ONCE() instead.
  */
 #define __ACCESS_ONCE(x) ({ \
         __maybe_unused typeof(x) __var = (__force typeof(x)) 0; \
index 27e285b92b5f748b8ffe9a8e599c8850f0346007..59915ea5373ca798dca185070e11af88cc7745d9 100644 (file)
@@ -151,10 +151,8 @@ static inline unsigned int cpumask_any_but(const struct cpumask *mask,
        return 1;
 }
 
-static inline int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp)
+static inline unsigned int cpumask_local_spread(unsigned int i, int node)
 {
-       set_bit(0, cpumask_bits(dstp));
-
        return 0;
 }
 
@@ -208,7 +206,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
 
 int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
 int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
-int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
+unsigned int cpumask_local_spread(unsigned int i, int node);
 
 /**
  * for_each_cpu - iterate over every cpu in a mask
index cb25af46105406908e5a825ca3812f0ff63d3194..420311bcee38c291cf75894ebfe4c2d1141da1a7 100644 (file)
@@ -45,7 +45,6 @@ extern struct dentry *arch_debugfs_dir;
 
 /* declared over in file.c */
 extern const struct file_operations debugfs_file_operations;
-extern const struct inode_operations debugfs_link_operations;
 
 struct dentry *debugfs_create_file(const char *name, umode_t mode,
                                   struct dentry *parent, void *data,
index 30624954dec5a9250c142d2c57d000b991a91710..e9bc9292bd3a5e8ff8ba6603564ee25a8911c7e4 100644 (file)
@@ -185,33 +185,85 @@ static inline int dmar_device_remove(void *handle)
 
 struct irte {
        union {
+               /* Shared between remapped and posted mode*/
                struct {
-                       __u64   present         : 1,
-                               fpd             : 1,
-                               dst_mode        : 1,
-                               redir_hint      : 1,
-                               trigger_mode    : 1,
-                               dlvry_mode      : 3,
-                               avail           : 4,
-                               __reserved_1    : 4,
-                               vector          : 8,
-                               __reserved_2    : 8,
-                               dest_id         : 32;
+                       __u64   present         : 1,  /*  0      */
+                               fpd             : 1,  /*  1      */
+                               __res0          : 6,  /*  2 -  6 */
+                               avail           : 4,  /*  8 - 11 */
+                               __res1          : 3,  /* 12 - 14 */
+                               pst             : 1,  /* 15      */
+                               vector          : 8,  /* 16 - 23 */
+                               __res2          : 40; /* 24 - 63 */
+               };
+
+               /* Remapped mode */
+               struct {
+                       __u64   r_present       : 1,  /*  0      */
+                               r_fpd           : 1,  /*  1      */
+                               dst_mode        : 1,  /*  2      */
+                               redir_hint      : 1,  /*  3      */
+                               trigger_mode    : 1,  /*  4      */
+                               dlvry_mode      : 3,  /*  5 -  7 */
+                               r_avail         : 4,  /*  8 - 11 */
+                               r_res0          : 4,  /* 12 - 15 */
+                               r_vector        : 8,  /* 16 - 23 */
+                               r_res1          : 8,  /* 24 - 31 */
+                               dest_id         : 32; /* 32 - 63 */
+               };
+
+               /* Posted mode */
+               struct {
+                       __u64   p_present       : 1,  /*  0      */
+                               p_fpd           : 1,  /*  1      */
+                               p_res0          : 6,  /*  2 -  7 */
+                               p_avail         : 4,  /*  8 - 11 */
+                               p_res1          : 2,  /* 12 - 13 */
+                               p_urgent        : 1,  /* 14      */
+                               p_pst           : 1,  /* 15      */
+                               p_vector        : 8,  /* 16 - 23 */
+                               p_res2          : 14, /* 24 - 37 */
+                               pda_l           : 26; /* 38 - 63 */
                };
                __u64 low;
        };
 
        union {
+               /* Shared between remapped and posted mode*/
                struct {
-                       __u64   sid             : 16,
-                               sq              : 2,
-                               svt             : 2,
-                               __reserved_3    : 44;
+                       __u64   sid             : 16,  /* 64 - 79  */
+                               sq              : 2,   /* 80 - 81  */
+                               svt             : 2,   /* 82 - 83  */
+                               __res3          : 44;  /* 84 - 127 */
+               };
+
+               /* Posted mode*/
+               struct {
+                       __u64   p_sid           : 16,  /* 64 - 79  */
+                               p_sq            : 2,   /* 80 - 81  */
+                               p_svt           : 2,   /* 82 - 83  */
+                               p_res3          : 12,  /* 84 - 95  */
+                               pda_h           : 32;  /* 96 - 127 */
                };
                __u64 high;
        };
 };
 
+static inline void dmar_copy_shared_irte(struct irte *dst, struct irte *src)
+{
+       dst->present    = src->present;
+       dst->fpd        = src->fpd;
+       dst->avail      = src->avail;
+       dst->pst        = src->pst;
+       dst->vector     = src->vector;
+       dst->sid        = src->sid;
+       dst->sq         = src->sq;
+       dst->svt        = src->svt;
+}
+
+#define PDA_LOW_BIT    26
+#define PDA_HIGH_BIT   32
+
 enum {
        IRQ_REMAP_XAPIC_MODE,
        IRQ_REMAP_X2APIC_MODE,
@@ -227,6 +279,7 @@ extern void dmar_msi_read(int irq, struct msi_msg *msg);
 extern void dmar_msi_write(int irq, struct msi_msg *msg);
 extern int dmar_set_interrupt(struct intel_iommu *iommu);
 extern irqreturn_t dmar_fault(int irq, void *dev_id);
-extern int arch_setup_dmar_msi(unsigned int irq);
+extern int dmar_alloc_hwirq(int id, int node, void *arg);
+extern void dmar_free_hwirq(int irq);
 
 #endif /* __DMAR_H__ */
index af5be0368dec26c934565e634c0dc803958ed2cc..2092965afca3994606ee8a255a97929a38df8095 100644 (file)
@@ -583,6 +583,9 @@ void efi_native_runtime_setup(void);
 #define EFI_FILE_INFO_ID \
     EFI_GUID(  0x9576e92, 0x6d3f, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b )
 
+#define EFI_SYSTEM_RESOURCE_TABLE_GUID \
+    EFI_GUID(  0xb122a263, 0x3661, 0x4f68, 0x99, 0x29, 0x78, 0xf8, 0xb0, 0xd6, 0x21, 0x80 )
+
 #define EFI_FILE_SYSTEM_GUID \
     EFI_GUID(  0x964e5b22, 0x6459, 0x11d2, 0x8e, 0x39, 0x00, 0xa0, 0xc9, 0x69, 0x72, 0x3b )
 
@@ -823,6 +826,7 @@ extern struct efi {
        unsigned long fw_vendor;        /* fw_vendor */
        unsigned long runtime;          /* runtime table */
        unsigned long config_table;     /* config tables */
+       unsigned long esrt;             /* ESRT table */
        efi_get_time_t *get_time;
        efi_set_time_t *set_time;
        efi_get_wakeup_time_t *get_wakeup_time;
@@ -875,6 +879,11 @@ static inline efi_status_t efi_query_variable_store(u32 attributes, unsigned lon
 #endif
 extern void __iomem *efi_lookup_mapped_addr(u64 phys_addr);
 extern int efi_config_init(efi_config_table_type_t *arch_tables);
+#ifdef CONFIG_EFI_ESRT
+extern void __init efi_esrt_init(void);
+#else
+static inline void efi_esrt_init(void) { }
+#endif
 extern int efi_config_parse_tables(void *config_tables, int count, int sz,
                                   efi_config_table_type_t *arch_tables);
 extern u64 efi_get_iobase (void);
@@ -882,12 +891,15 @@ extern u32 efi_mem_type (unsigned long phys_addr);
 extern u64 efi_mem_attributes (unsigned long phys_addr);
 extern u64 efi_mem_attribute (unsigned long phys_addr, unsigned long size);
 extern int __init efi_uart_console_only (void);
+extern u64 efi_mem_desc_end(efi_memory_desc_t *md);
+extern int efi_mem_desc_lookup(u64 phys_addr, efi_memory_desc_t *out_md);
 extern void efi_initialize_iomem_resources(struct resource *code_resource,
                struct resource *data_resource, struct resource *bss_resource);
 extern void efi_get_time(struct timespec *now);
 extern void efi_reserve_boot_services(void);
 extern int efi_get_fdt_params(struct efi_fdt_params *params, int verbose);
 extern struct efi_memory_map memmap;
+extern struct kobject *efi_kobj;
 
 extern int efi_reboot_quirk_mode;
 extern bool efi_poweroff_required(void);
index 35ec87e490b1a41ff0bc3ba20b06ac9d958f972a..b577e801b4af17ddd3288e28c209b644415cc63c 100644 (file)
@@ -38,7 +38,6 @@ struct backing_dev_info;
 struct export_operations;
 struct hd_geometry;
 struct iovec;
-struct nameidata;
 struct kiocb;
 struct kobject;
 struct pipe_inode_info;
@@ -656,6 +655,7 @@ struct inode {
                struct pipe_inode_info  *i_pipe;
                struct block_device     *i_bdev;
                struct cdev             *i_cdev;
+               char                    *i_link;
        };
 
        __u32                   i_generation;
@@ -1607,12 +1607,12 @@ struct file_operations {
 
 struct inode_operations {
        struct dentry * (*lookup) (struct inode *,struct dentry *, unsigned int);
-       void * (*follow_link) (struct dentry *, struct nameidata *);
+       const char * (*follow_link) (struct dentry *, void **);
        int (*permission) (struct inode *, int);
        struct posix_acl * (*get_acl)(struct inode *, int);
 
        int (*readlink) (struct dentry *, char __user *,int);
-       void (*put_link) (struct dentry *, struct nameidata *, void *);
+       void (*put_link) (struct inode *, void *);
 
        int (*create) (struct inode *,struct dentry *, umode_t, bool);
        int (*link) (struct dentry *,struct inode *,struct dentry *);
@@ -1879,6 +1879,7 @@ enum file_time_flags {
        S_VERSION = 8,
 };
 
+extern bool atime_needs_update(const struct path *, struct inode *);
 extern void touch_atime(const struct path *);
 static inline void file_accessed(struct file *file)
 {
@@ -2704,13 +2705,14 @@ extern const struct file_operations generic_ro_fops;
 
 extern int readlink_copy(char __user *, int, const char *);
 extern int page_readlink(struct dentry *, char __user *, int);
-extern void *page_follow_link_light(struct dentry *, struct nameidata *);
-extern void page_put_link(struct dentry *, struct nameidata *, void *);
+extern const char *page_follow_link_light(struct dentry *, void **);
+extern void page_put_link(struct inode *, void *);
 extern int __page_symlink(struct inode *inode, const char *symname, int len,
                int nofs);
 extern int page_symlink(struct inode *inode, const char *symname, int len);
 extern const struct inode_operations page_symlink_inode_operations;
-extern void kfree_put_link(struct dentry *, struct nameidata *, void *);
+extern void kfree_put_link(struct inode *, void *);
+extern void free_page_put_link(struct inode *, void *);
 extern int generic_readlink(struct dentry *, char __user *, int);
 extern void generic_fillattr(struct inode *, struct kstat *);
 int vfs_getattr_nosec(struct path *path, struct kstat *stat);
@@ -2721,6 +2723,8 @@ void __inode_sub_bytes(struct inode *inode, loff_t bytes);
 void inode_sub_bytes(struct inode *inode, loff_t bytes);
 loff_t inode_get_bytes(struct inode *inode);
 void inode_set_bytes(struct inode *inode, loff_t bytes);
+const char *simple_follow_link(struct dentry *, void **);
+extern const struct inode_operations simple_symlink_inode_operations;
 
 extern int iterate_dir(struct file *, struct dir_context *);
 
index 46e83c2156c667c785fd869a8626e5df44f9c282..f9ecf63d47f1d27e116ff7a6e7959dce0a165837 100644 (file)
@@ -46,7 +46,7 @@ const char *ftrace_print_hex_seq(struct trace_seq *p,
                                 const unsigned char *buf, int len);
 
 const char *ftrace_print_array_seq(struct trace_seq *p,
-                                  const void *buf, int buf_len,
+                                  const void *buf, int count,
                                   size_t el_size);
 
 struct trace_iterator;
index 97a9373e61e80d048ee6259eb4962bd831cac822..15928f0647e44187eb00ae7e128f4047519b093b 100644 (file)
@@ -30,6 +30,7 @@ struct vm_area_struct;
 #define ___GFP_HARDWALL                0x20000u
 #define ___GFP_THISNODE                0x40000u
 #define ___GFP_RECLAIMABLE     0x80000u
+#define ___GFP_NOACCOUNT       0x100000u
 #define ___GFP_NOTRACK         0x200000u
 #define ___GFP_NO_KSWAPD       0x400000u
 #define ___GFP_OTHER_NODE      0x800000u
@@ -87,6 +88,7 @@ struct vm_area_struct;
 #define __GFP_HARDWALL   ((__force gfp_t)___GFP_HARDWALL) /* Enforce hardwall cpuset memory allocs */
 #define __GFP_THISNODE ((__force gfp_t)___GFP_THISNODE)/* No fallback, no policies */
 #define __GFP_RECLAIMABLE ((__force gfp_t)___GFP_RECLAIMABLE) /* Page is reclaimable */
+#define __GFP_NOACCOUNT        ((__force gfp_t)___GFP_NOACCOUNT) /* Don't account to kmemcg */
 #define __GFP_NOTRACK  ((__force gfp_t)___GFP_NOTRACK)  /* Don't track with kmemcheck */
 
 #define __GFP_NO_KSWAPD        ((__force gfp_t)___GFP_NO_KSWAPD)
index f4af03404b9789805e06638303a8873aa14fe5a8..dfd59d6bc6f0f74ef89b0512a25180fd0077e2d3 100644 (file)
@@ -1,7 +1,7 @@
 #ifndef LINUX_HARDIRQ_H
 #define LINUX_HARDIRQ_H
 
-#include <linux/preempt_mask.h>
+#include <linux/preempt.h>
 #include <linux/lockdep.h>
 #include <linux/ftrace_irq.h>
 #include <linux/vtime.h>
index 0408421d885f9433c61149e2769bdd3c08192522..0042bf330b99ffa6edd77677529753bdd00b79d4 100644 (file)
@@ -74,7 +74,7 @@ struct sensor_hub_pending {
  * @usage:             Usage id for this hub device instance.
  * @start_collection_index: Starting index for a phy type collection
  * @end_collection_index: Last index for a phy type collection
- * @mutex:             synchronizing mutex.
+ * @mutex_ptr:         synchronizing mutex pointer.
  * @pending:           Holds information of pending sync read request.
  */
 struct hid_sensor_hub_device {
@@ -84,7 +84,7 @@ struct hid_sensor_hub_device {
        u32 usage;
        int start_collection_index;
        int end_collection_index;
-       struct mutex mutex;
+       struct mutex *mutex_ptr;
        struct sensor_hub_pending pending;
 };
 
index 9286a46b7d69b539f027bcc890b3be976d20f228..6aefcd0031a6bd013cf322d1021812fcf84250c2 100644 (file)
@@ -65,6 +65,7 @@ static inline void kunmap(struct page *page)
 
 static inline void *kmap_atomic(struct page *page)
 {
+       preempt_disable();
        pagefault_disable();
        return page_address(page);
 }
@@ -73,6 +74,7 @@ static inline void *kmap_atomic(struct page *page)
 static inline void __kunmap_atomic(void *addr)
 {
        pagefault_enable();
+       preempt_enable();
 }
 
 #define kmap_atomic_pfn(pfn)   kmap_atomic(pfn_to_page(pfn))
index 05f6df1fdf5bbfc70880f188c40e61264f764cc7..76dd4f0da5ca9907572bea19f4b0d0dbe8ad06eb 100644 (file)
@@ -53,34 +53,25 @@ enum hrtimer_restart {
  *
  * 0x00                inactive
  * 0x01                enqueued into rbtree
- * 0x02                callback function running
- * 0x04                timer is migrated to another cpu
  *
- * Special cases:
- * 0x03                callback function running and enqueued
- *             (was requeued on another CPU)
- * 0x05                timer was migrated on CPU hotunplug
+ * The callback state is not part of the timer->state because clearing it would
+ * mean touching the timer after the callback, this makes it impossible to free
+ * the timer from the callback function.
  *
- * The "callback function running and enqueued" status is only possible on
- * SMP. It happens for example when a posix timer expired and the callback
+ * Therefore we track the callback state in:
+ *
+ *     timer->base->cpu_base->running == timer
+ *
+ * On SMP it is possible to have a "callback function running and enqueued"
+ * status. It happens for example when a posix timer expired and the callback
  * queued a signal. Between dropping the lock which protects the posix timer
  * and reacquiring the base lock of the hrtimer, another CPU can deliver the
- * signal and rearm the timer. We have to preserve the callback running state,
- * as otherwise the timer could be removed before the softirq code finishes the
- * the handling of the timer.
- *
- * The HRTIMER_STATE_ENQUEUED bit is always or'ed to the current state
- * to preserve the HRTIMER_STATE_CALLBACK in the above scenario. This
- * also affects HRTIMER_STATE_MIGRATE where the preservation is not
- * necessary. HRTIMER_STATE_MIGRATE is cleared after the timer is
- * enqueued on the new cpu.
+ * signal and rearm the timer.
  *
  * All state transitions are protected by cpu_base->lock.
  */
 #define HRTIMER_STATE_INACTIVE 0x00
 #define HRTIMER_STATE_ENQUEUED 0x01
-#define HRTIMER_STATE_CALLBACK 0x02
-#define HRTIMER_STATE_MIGRATE  0x04
 
 /**
  * struct hrtimer - the basic hrtimer structure
@@ -130,6 +121,12 @@ struct hrtimer_sleeper {
        struct task_struct *task;
 };
 
+#ifdef CONFIG_64BIT
+# define HRTIMER_CLOCK_BASE_ALIGN      64
+#else
+# define HRTIMER_CLOCK_BASE_ALIGN      32
+#endif
+
 /**
  * struct hrtimer_clock_base - the timer base for a specific clock
  * @cpu_base:          per cpu clock base
@@ -137,9 +134,7 @@ struct hrtimer_sleeper {
  *                     timer to a base on another cpu.
  * @clockid:           clock id for per_cpu support
  * @active:            red black tree root node for the active timers
- * @resolution:                the resolution of the clock, in nanoseconds
  * @get_time:          function to retrieve the current time of the clock
- * @softirq_time:      the time when running the hrtimer queue in the softirq
  * @offset:            offset of this clock to the monotonic base
  */
 struct hrtimer_clock_base {
@@ -147,11 +142,9 @@ struct hrtimer_clock_base {
        int                     index;
        clockid_t               clockid;
        struct timerqueue_head  active;
-       ktime_t                 resolution;
        ktime_t                 (*get_time)(void);
-       ktime_t                 softirq_time;
        ktime_t                 offset;
-};
+} __attribute__((__aligned__(HRTIMER_CLOCK_BASE_ALIGN)));
 
 enum  hrtimer_base_type {
        HRTIMER_BASE_MONOTONIC,
@@ -165,11 +158,16 @@ enum  hrtimer_base_type {
  * struct hrtimer_cpu_base - the per cpu clock bases
  * @lock:              lock protecting the base and associated clock bases
  *                     and timers
+ * @seq:               seqcount around __run_hrtimer
+ * @running:           pointer to the currently running hrtimer
  * @cpu:               cpu number
  * @active_bases:      Bitfield to mark bases with active timers
- * @clock_was_set:     Indicates that clock was set from irq context.
+ * @clock_was_set_seq: Sequence counter of clock was set events
+ * @migration_enabled: The migration of hrtimers to other cpus is enabled
+ * @nohz_active:       The nohz functionality is enabled
  * @expires_next:      absolute time of the next event which was scheduled
  *                     via clock_set_next_event()
+ * @next_timer:                Pointer to the first expiring timer
  * @in_hrtirq:         hrtimer_interrupt() is currently executing
  * @hres_active:       State of high resolution mode
  * @hang_detected:     The last hrtimer interrupt detected a hang
@@ -178,27 +176,38 @@ enum  hrtimer_base_type {
  * @nr_hangs:          Total number of hrtimer interrupt hangs
  * @max_hang_time:     Maximum time spent in hrtimer_interrupt
  * @clock_base:                array of clock bases for this cpu
+ *
+ * Note: next_timer is just an optimization for __remove_hrtimer().
+ *      Do not dereference the pointer because it is not reliable on
+ *      cross cpu removals.
  */
 struct hrtimer_cpu_base {
        raw_spinlock_t                  lock;
+       seqcount_t                      seq;
+       struct hrtimer                  *running;
        unsigned int                    cpu;
        unsigned int                    active_bases;
-       unsigned int                    clock_was_set;
+       unsigned int                    clock_was_set_seq;
+       bool                            migration_enabled;
+       bool                            nohz_active;
 #ifdef CONFIG_HIGH_RES_TIMERS
+       unsigned int                    in_hrtirq       : 1,
+                                       hres_active     : 1,
+                                       hang_detected   : 1;
        ktime_t                         expires_next;
-       int                             in_hrtirq;
-       int                             hres_active;
-       int                             hang_detected;
-       unsigned long                   nr_events;
-       unsigned long                   nr_retries;
-       unsigned long                   nr_hangs;
-       ktime_t                         max_hang_time;
+       struct hrtimer                  *next_timer;
+       unsigned int                    nr_events;
+       unsigned int                    nr_retries;
+       unsigned int                    nr_hangs;
+       unsigned int                    max_hang_time;
 #endif
        struct hrtimer_clock_base       clock_base[HRTIMER_MAX_CLOCK_BASES];
-};
+} ____cacheline_aligned;
 
 static inline void hrtimer_set_expires(struct hrtimer *timer, ktime_t time)
 {
+       BUILD_BUG_ON(sizeof(struct hrtimer_clock_base) > HRTIMER_CLOCK_BASE_ALIGN);
+
        timer->node.expires = time;
        timer->_softexpires = time;
 }
@@ -262,19 +271,16 @@ static inline ktime_t hrtimer_expires_remaining(const struct hrtimer *timer)
        return ktime_sub(timer->node.expires, timer->base->get_time());
 }
 
-#ifdef CONFIG_HIGH_RES_TIMERS
-struct clock_event_device;
-
-extern void hrtimer_interrupt(struct clock_event_device *dev);
-
-/*
- * In high resolution mode the time reference must be read accurate
- */
 static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
 {
        return timer->base->get_time();
 }
 
+#ifdef CONFIG_HIGH_RES_TIMERS
+struct clock_event_device;
+
+extern void hrtimer_interrupt(struct clock_event_device *dev);
+
 static inline int hrtimer_is_hres_active(struct hrtimer *timer)
 {
        return timer->base->cpu_base->hres_active;
@@ -295,21 +301,16 @@ extern void hrtimer_peek_ahead_timers(void);
 
 extern void clock_was_set_delayed(void);
 
+extern unsigned int hrtimer_resolution;
+
 #else
 
 # define MONOTONIC_RES_NSEC    LOW_RES_NSEC
 # define KTIME_MONOTONIC_RES   KTIME_LOW_RES
 
-static inline void hrtimer_peek_ahead_timers(void) { }
+#define hrtimer_resolution     (unsigned int)LOW_RES_NSEC
 
-/*
- * In non high resolution mode the time reference is taken from
- * the base softirq time variable.
- */
-static inline ktime_t hrtimer_cb_get_time(struct hrtimer *timer)
-{
-       return timer->base->softirq_time;
-}
+static inline void hrtimer_peek_ahead_timers(void) { }
 
 static inline int hrtimer_is_hres_active(struct hrtimer *timer)
 {
@@ -353,49 +354,47 @@ static inline void destroy_hrtimer_on_stack(struct hrtimer *timer) { }
 #endif
 
 /* Basic timer operations: */
-extern int hrtimer_start(struct hrtimer *timer, ktime_t tim,
-                        const enum hrtimer_mode mode);
-extern int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+extern void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
                        unsigned long range_ns, const enum hrtimer_mode mode);
-extern int
-__hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
-                        unsigned long delta_ns,
-                        const enum hrtimer_mode mode, int wakeup);
+
+/**
+ * hrtimer_start - (re)start an hrtimer on the current CPU
+ * @timer:     the timer to be added
+ * @tim:       expiry time
+ * @mode:      expiry mode: absolute (HRTIMER_MODE_ABS) or
+ *             relative (HRTIMER_MODE_REL)
+ */
+static inline void hrtimer_start(struct hrtimer *timer, ktime_t tim,
+                                const enum hrtimer_mode mode)
+{
+       hrtimer_start_range_ns(timer, tim, 0, mode);
+}
 
 extern int hrtimer_cancel(struct hrtimer *timer);
 extern int hrtimer_try_to_cancel(struct hrtimer *timer);
 
-static inline int hrtimer_start_expires(struct hrtimer *timer,
-                                               enum hrtimer_mode mode)
+static inline void hrtimer_start_expires(struct hrtimer *timer,
+                                        enum hrtimer_mode mode)
 {
        unsigned long delta;
        ktime_t soft, hard;
        soft = hrtimer_get_softexpires(timer);
        hard = hrtimer_get_expires(timer);
        delta = ktime_to_ns(ktime_sub(hard, soft));
-       return hrtimer_start_range_ns(timer, soft, delta, mode);
+       hrtimer_start_range_ns(timer, soft, delta, mode);
 }
 
-static inline int hrtimer_restart(struct hrtimer *timer)
+static inline void hrtimer_restart(struct hrtimer *timer)
 {
-       return hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
+       hrtimer_start_expires(timer, HRTIMER_MODE_ABS);
 }
 
 /* Query timers: */
 extern ktime_t hrtimer_get_remaining(const struct hrtimer *timer);
-extern int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp);
 
-extern ktime_t hrtimer_get_next_event(void);
+extern u64 hrtimer_get_next_event(void);
 
-/*
- * A timer is active, when it is enqueued into the rbtree or the
- * callback function is running or it's in the state of being migrated
- * to another cpu.
- */
-static inline int hrtimer_active(const struct hrtimer *timer)
-{
-       return timer->state != HRTIMER_STATE_INACTIVE;
-}
+extern bool hrtimer_active(const struct hrtimer *timer);
 
 /*
  * Helper function to check, whether the timer is on one of the queues
@@ -411,14 +410,29 @@ static inline int hrtimer_is_queued(struct hrtimer *timer)
  */
 static inline int hrtimer_callback_running(struct hrtimer *timer)
 {
-       return timer->state & HRTIMER_STATE_CALLBACK;
+       return timer->base->cpu_base->running == timer;
 }
 
 /* Forward a hrtimer so it expires after now: */
 extern u64
 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval);
 
-/* Forward a hrtimer so it expires after the hrtimer's current now */
+/**
+ * hrtimer_forward_now - forward the timer expiry so it expires after now
+ * @timer:     hrtimer to forward
+ * @interval:  the interval to forward
+ *
+ * Forward the timer expiry so it will expire after the current time
+ * of the hrtimer clock base. Returns the number of overruns.
+ *
+ * Can be safely called from the callback function of @timer. If
+ * called from other contexts @timer must neither be enqueued nor
+ * running the callback and the caller needs to take care of
+ * serialization.
+ *
+ * Note: This only updates the timer expiry value and does not requeue
+ * the timer.
+ */
 static inline u64 hrtimer_forward_now(struct hrtimer *timer,
                                      ktime_t interval)
 {
@@ -443,7 +457,6 @@ extern int schedule_hrtimeout(ktime_t *expires, const enum hrtimer_mode mode);
 
 /* Soft interrupt function to run the hrtimer queues: */
 extern void hrtimer_run_queues(void);
-extern void hrtimer_run_pending(void);
 
 /* Bootup initialization: */
 extern void __init hrtimers_init(void);
index 70a1dbbf209350836f97743363fadfe4f6f95b36..d4a527e58434df73800eed55fbbc6750f5f2d5ee 100644 (file)
@@ -1,24 +1,38 @@
 #ifndef LINUX_HTIRQ_H
 #define LINUX_HTIRQ_H
 
+struct pci_dev;
+struct irq_data;
+
 struct ht_irq_msg {
        u32     address_lo;     /* low 32 bits of the ht irq message */
        u32     address_hi;     /* high 32 bits of the it irq message */
 };
 
+typedef void (ht_irq_update_t)(struct pci_dev *dev, int irq,
+                              struct ht_irq_msg *msg);
+
+struct ht_irq_cfg {
+       struct pci_dev *dev;
+        /* Update callback used to cope with buggy hardware */
+       ht_irq_update_t *update;
+       unsigned pos;
+       unsigned idx;
+       struct ht_irq_msg msg;
+};
+
 /* Helper functions.. */
 void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
 void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg);
-struct irq_data;
 void mask_ht_irq(struct irq_data *data);
 void unmask_ht_irq(struct irq_data *data);
 
 /* The arch hook for getting things started */
-int arch_setup_ht_irq(unsigned int irq, struct pci_dev *dev);
+int arch_setup_ht_irq(int idx, int pos, struct pci_dev *dev,
+                     ht_irq_update_t *update);
+void arch_teardown_ht_irq(unsigned int irq);
 
 /* For drivers of buggy hardware */
-typedef void (ht_irq_update_t)(struct pci_dev *dev, int irq,
-                              struct ht_irq_msg *msg);
 int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update);
 
 #endif /* LINUX_HTIRQ_H */
index 696d22312b3199ed2f515240111a669a35d48934..bb9b075f0eb022e8b35fa64916af56245243cc5b 100644 (file)
@@ -50,9 +50,8 @@ extern struct fs_struct init_fs;
        .cpu_timers     = INIT_CPU_TIMERS(sig.cpu_timers),              \
        .rlim           = INIT_RLIMITS,                                 \
        .cputimer       = {                                             \
-               .cputime = INIT_CPUTIME,                                \
-               .running = 0,                                           \
-               .lock = __RAW_SPIN_LOCK_UNLOCKED(sig.cputimer.lock),    \
+               .cputime_atomic = INIT_CPUTIME_ATOMIC,                  \
+               .running        = 0,                                    \
        },                                                              \
        .cred_guard_mutex =                                             \
                 __MUTEX_INITIALIZER(sig.cred_guard_mutex),             \
index 796ef9645827f000cb76ce4cd8637dc6cfa4db7a..3665cb331ca1c6f58276cc1ea14cbe77ce19d3b1 100644 (file)
@@ -87,6 +87,7 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
 /*
  * Decoding Capability Register
  */
+#define cap_pi_support(c)      (((c) >> 59) & 1)
 #define cap_read_drain(c)      (((c) >> 55) & 1)
 #define cap_write_drain(c)     (((c) >> 54) & 1)
 #define cap_max_amask_val(c)   (((c) >> 48) & 0x3f)
@@ -115,13 +116,14 @@ static inline void dmar_writeq(void __iomem *addr, u64 val)
  * Extended Capability Register
  */
 
+#define ecap_pasid(e)          ((e >> 40) & 0x1)
 #define ecap_pss(e)            ((e >> 35) & 0x1f)
 #define ecap_eafs(e)           ((e >> 34) & 0x1)
 #define ecap_nwfs(e)           ((e >> 33) & 0x1)
 #define ecap_srs(e)            ((e >> 31) & 0x1)
 #define ecap_ers(e)            ((e >> 30) & 0x1)
 #define ecap_prs(e)            ((e >> 29) & 0x1)
-#define ecap_pasid(e)          ((e >> 28) & 0x1)
+/* PASID support used to be on bit 28 */
 #define ecap_dis(e)            ((e >> 27) & 0x1)
 #define ecap_nest(e)           ((e >> 26) & 0x1)
 #define ecap_mts(e)            ((e >> 25) & 0x1)
@@ -298,6 +300,8 @@ struct q_inval {
 
 #define INTR_REMAP_TABLE_ENTRIES       65536
 
+struct irq_domain;
+
 struct ir_table {
        struct irte *base;
        unsigned long *bitmap;
@@ -347,6 +351,8 @@ struct intel_iommu {
 
 #ifdef CONFIG_IRQ_REMAP
        struct ir_table *ir_table;      /* Interrupt remapping info */
+       struct irq_domain *ir_domain;
+       struct irq_domain *ir_msi_domain;
 #endif
        struct device   *iommu_dev; /* IOMMU-sysfs device */
        int             node;
index 950ae45018260224c9138b043f9071ce4d1f9acc..be7e75c945e97b07d5f248ded5c1e0d9240756ad 100644 (file)
@@ -413,7 +413,8 @@ enum
        BLOCK_IOPOLL_SOFTIRQ,
        TASKLET_SOFTIRQ,
        SCHED_SOFTIRQ,
-       HRTIMER_SOFTIRQ,
+       HRTIMER_SOFTIRQ, /* Unused, but kept as tools rely on the
+                           numbering. Sigh! */
        RCU_SOFTIRQ,    /* Preferable RCU should always be the last softirq */
 
        NR_SOFTIRQS
@@ -592,10 +593,10 @@ tasklet_hrtimer_init(struct tasklet_hrtimer *ttimer,
                     clockid_t which_clock, enum hrtimer_mode mode);
 
 static inline
-int tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
-                         const enum hrtimer_mode mode)
+void tasklet_hrtimer_start(struct tasklet_hrtimer *ttimer, ktime_t time,
+                          const enum hrtimer_mode mode)
 {
-       return hrtimer_start(&ttimer->timer, time, mode);
+       hrtimer_start(&ttimer->timer, time, mode);
 }
 
 static inline
index 657fab4efab351070a9aaf7a625759159a786580..c27dde7215b5b291394747d35e1f4a19d9f1ac8e 100644 (file)
@@ -141,6 +141,7 @@ static inline void __iomem *
 io_mapping_map_atomic_wc(struct io_mapping *mapping,
                         unsigned long offset)
 {
+       preempt_disable();
        pagefault_disable();
        return ((char __force __iomem *) mapping) + offset;
 }
@@ -149,6 +150,7 @@ static inline void
 io_mapping_unmap_atomic(void __iomem *vaddr)
 {
        pagefault_enable();
+       preempt_enable();
 }
 
 /* Non-atomic map/unmap */
index 986f2bffea1edc95513361a76fa55df5a3e06c06..fb5a99800e77faf6481363fb1fe43bb156d8b213 100644 (file)
@@ -19,6 +19,7 @@
 #define _LINUX_IO_H
 
 #include <linux/types.h>
+#include <linux/init.h>
 #include <asm/io.h>
 #include <asm/page.h>
 
@@ -111,6 +112,13 @@ static inline void arch_phys_wc_del(int handle)
 }
 
 #define arch_phys_wc_add arch_phys_wc_add
+#ifndef arch_phys_wc_index
+static inline int arch_phys_wc_index(int handle)
+{
+       return -1;
+}
+#define arch_phys_wc_index arch_phys_wc_index
+#endif
 #endif
 
 #endif /* _LINUX_IO_H */
index 62c6901cab550d7f57039c5b7052fc08bbe0964d..48cb7d1aa58f00d183566119ea50789f12e6d90d 100644 (file)
@@ -327,6 +327,7 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d)
  * @irq_write_msi_msg: optional to write message content for MSI
  * @irq_get_irqchip_state:     return the internal state of an interrupt
  * @irq_set_irqchip_state:     set the internal state of a interrupt
+ * @irq_set_vcpu_affinity:     optional to target a vCPU in a virtual machine
  * @flags:             chip specific flags
  */
 struct irq_chip {
@@ -369,6 +370,8 @@ struct irq_chip {
        int             (*irq_get_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool *state);
        int             (*irq_set_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool state);
 
+       int             (*irq_set_vcpu_affinity)(struct irq_data *data, void *vcpu_info);
+
        unsigned long   flags;
 };
 
@@ -422,6 +425,7 @@ extern void irq_cpu_online(void);
 extern void irq_cpu_offline(void);
 extern int irq_set_affinity_locked(struct irq_data *data,
                                   const struct cpumask *cpumask, bool force);
+extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info);
 
 #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ)
 void irq_move_irq(struct irq_data *data);
@@ -467,6 +471,8 @@ extern int irq_chip_set_affinity_parent(struct irq_data *data,
                                        const struct cpumask *dest,
                                        bool force);
 extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on);
+extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data,
+                                            void *vcpu_info);
 #endif
 
 /* Handling of unhandled and spurious interrupts: */
index 36ec4ae746345aaa9b79698546c3568976e0b2c3..9de976b4f9a79c8ef8bb43e7a150a2533e74f39a 100644 (file)
@@ -95,8 +95,6 @@
 
 struct device_node;
 
-extern struct irq_chip gic_arch_extn;
-
 void gic_set_irqchip_flags(unsigned long flags);
 void gic_init_bases(unsigned int, int, void __iomem *, void __iomem *,
                    u32 offset, struct device_node *);
index dd1109fb241e42263e5851ddbc325f469c42a87c..a113a8dc7438bb577ce4112bdd7e193bb7c4381e 100644 (file)
@@ -93,6 +93,15 @@ struct irq_desc {
 extern struct irq_desc irq_desc[NR_IRQS];
 #endif
 
+static inline struct irq_desc *irq_data_to_desc(struct irq_data *data)
+{
+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY
+       return irq_to_desc(data->irq);
+#else
+       return container_of(data, struct irq_desc, irq_data);
+#endif
+}
+
 static inline struct irq_data *irq_desc_get_irq_data(struct irq_desc *desc)
 {
        return &desc->irq_data;
index c367cbdf73ab1a5b83f1af48c848be21b466167d..535fd3bb1ba889d77afc3d7ad25c093810b5bfd6 100644 (file)
@@ -7,6 +7,7 @@
 #include <linux/time.h>
 #include <linux/timex.h>
 #include <asm/param.h>                 /* for HZ */
+#include <generated/timeconst.h>
 
 /*
  * The following defines establish the engineering parameters of the PLL
@@ -288,8 +289,133 @@ static inline u64 jiffies_to_nsecs(const unsigned long j)
        return (u64)jiffies_to_usecs(j) * NSEC_PER_USEC;
 }
 
-extern unsigned long msecs_to_jiffies(const unsigned int m);
-extern unsigned long usecs_to_jiffies(const unsigned int u);
+extern unsigned long __msecs_to_jiffies(const unsigned int m);
+#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
+/*
+ * HZ is equal to or smaller than 1000, and 1000 is a nice round
+ * multiple of HZ, divide with the factor between them, but round
+ * upwards:
+ */
+static inline unsigned long _msecs_to_jiffies(const unsigned int m)
+{
+       return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
+}
+#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
+/*
+ * HZ is larger than 1000, and HZ is a nice round multiple of 1000 -
+ * simply multiply with the factor between them.
+ *
+ * But first make sure the multiplication result cannot overflow:
+ */
+static inline unsigned long _msecs_to_jiffies(const unsigned int m)
+{
+       if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
+               return MAX_JIFFY_OFFSET;
+       return m * (HZ / MSEC_PER_SEC);
+}
+#else
+/*
+ * Generic case - multiply, round and divide. But first check that if
+ * we are doing a net multiplication, that we wouldn't overflow:
+ */
+static inline unsigned long _msecs_to_jiffies(const unsigned int m)
+{
+       if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
+               return MAX_JIFFY_OFFSET;
+
+       return (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32) >> MSEC_TO_HZ_SHR32;
+}
+#endif
+/**
+ * msecs_to_jiffies: - convert milliseconds to jiffies
+ * @m: time in milliseconds
+ *
+ * conversion is done as follows:
+ *
+ * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
+ *
+ * - 'too large' values [that would result in larger than
+ *   MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
+ *
+ * - all other values are converted to jiffies by either multiplying
+ *   the input value by a factor or dividing it with a factor and
+ *   handling any 32-bit overflows.
+ *   for the details see __msecs_to_jiffies()
+ *
+ * msecs_to_jiffies() checks for the passed in value being a constant
+ * via __builtin_constant_p() allowing gcc to eliminate most of the
+ * code, __msecs_to_jiffies() is called if the value passed does not
+ * allow constant folding and the actual conversion must be done at
+ * runtime.
+ * the HZ range specific helpers _msecs_to_jiffies() are called both
+ * directly here and from __msecs_to_jiffies() in the case where
+ * constant folding is not possible.
+ */
+static inline unsigned long msecs_to_jiffies(const unsigned int m)
+{
+       if (__builtin_constant_p(m)) {
+               if ((int)m < 0)
+                       return MAX_JIFFY_OFFSET;
+               return _msecs_to_jiffies(m);
+       } else {
+               return __msecs_to_jiffies(m);
+       }
+}
+
+extern unsigned long __usecs_to_jiffies(const unsigned int u);
+#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
+static inline unsigned long _usecs_to_jiffies(const unsigned int u)
+{
+       return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
+}
+#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
+static inline unsigned long _usecs_to_jiffies(const unsigned int u)
+{
+       return u * (HZ / USEC_PER_SEC);
+}
+static inline unsigned long _usecs_to_jiffies(const unsigned int u)
+{
+#else
+static inline unsigned long _usecs_to_jiffies(const unsigned int u)
+{
+       return (USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32)
+               >> USEC_TO_HZ_SHR32;
+}
+#endif
+
+/**
+ * usecs_to_jiffies: - convert microseconds to jiffies
+ * @u: time in microseconds
+ *
+ * conversion is done as follows:
+ *
+ * - 'too large' values [that would result in larger than
+ *   MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
+ *
+ * - all other values are converted to jiffies by either multiplying
+ *   the input value by a factor or dividing it with a factor and
+ *   handling any 32-bit overflows as for msecs_to_jiffies.
+ *
+ * usecs_to_jiffies() checks for the passed in value being a constant
+ * via __builtin_constant_p() allowing gcc to eliminate most of the
+ * code, __usecs_to_jiffies() is called if the value passed does not
+ * allow constant folding and the actual conversion must be done at
+ * runtime.
+ * the HZ range specific helpers _usecs_to_jiffies() are called both
+ * directly here and from __msecs_to_jiffies() in the case where
+ * constant folding is not possible.
+ */
+static inline unsigned long usecs_to_jiffies(const unsigned int u)
+{
+       if (__builtin_constant_p(u)) {
+               if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
+                       return MAX_JIFFY_OFFSET;
+               return _usecs_to_jiffies(u);
+       } else {
+               return __usecs_to_jiffies(u);
+       }
+}
+
 extern unsigned long timespec_to_jiffies(const struct timespec *value);
 extern void jiffies_to_timespec(const unsigned long jiffies,
                                struct timespec *value);
index 3a5b48e52a9ee3ce035a7a36a9cf35a2cc807bcb..060dd7b61c6d411bd1a8b9c48fa9f4010450e81d 100644 (file)
@@ -244,7 +244,8 @@ static inline u32 reciprocal_scale(u32 val, u32 ep_ro)
 
 #if defined(CONFIG_MMU) && \
        (defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP))
-void might_fault(void);
+#define might_fault() __might_fault(__FILE__, __LINE__)
+void __might_fault(const char *file, int line);
 #else
 static inline void might_fault(void) { }
 #endif
index 5fc3d1083071ca24a96a6da038324fafae997667..2b6a204bd8d40cfb74db80e19bec216795366e33 100644 (file)
@@ -166,19 +166,34 @@ static inline bool ktime_before(const ktime_t cmp1, const ktime_t cmp2)
 }
 
 #if BITS_PER_LONG < 64
-extern u64 __ktime_divns(const ktime_t kt, s64 div);
-static inline u64 ktime_divns(const ktime_t kt, s64 div)
+extern s64 __ktime_divns(const ktime_t kt, s64 div);
+static inline s64 ktime_divns(const ktime_t kt, s64 div)
 {
+       /*
+        * Negative divisors could cause an inf loop,
+        * so bug out here.
+        */
+       BUG_ON(div < 0);
        if (__builtin_constant_p(div) && !(div >> 32)) {
-               u64 ns = kt.tv64;
-               do_div(ns, div);
-               return ns;
+               s64 ns = kt.tv64;
+               u64 tmp = ns < 0 ? -ns : ns;
+
+               do_div(tmp, div);
+               return ns < 0 ? -tmp : tmp;
        } else {
                return __ktime_divns(kt, div);
        }
 }
 #else /* BITS_PER_LONG < 64 */
-# define ktime_divns(kt, div)          (u64)((kt).tv64 / (div))
+static inline s64 ktime_divns(const ktime_t kt, s64 div)
+{
+       /*
+        * 32-bit implementation cannot handle negative divisors,
+        * so catch them on 64bit as well.
+        */
+       WARN_ON(div < 0);
+       return kt.tv64 / div;
+}
 #endif
 
 static inline s64 ktime_to_us(const ktime_t kt)
index 0081f000e34b30f0ba0f6562274bb33a9ae0e74c..c92ebd100d9b653e25ed9accc4d1ac4569bea784 100644 (file)
@@ -52,10 +52,15 @@ struct lglock {
        static struct lglock name = { .lock = &name ## _lock }
 
 void lg_lock_init(struct lglock *lg, char *name);
+
 void lg_local_lock(struct lglock *lg);
 void lg_local_unlock(struct lglock *lg);
 void lg_local_lock_cpu(struct lglock *lg, int cpu);
 void lg_local_unlock_cpu(struct lglock *lg, int cpu);
+
+void lg_double_lock(struct lglock *lg, int cpu1, int cpu2);
+void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2);
+
 void lg_global_lock(struct lglock *lg);
 void lg_global_unlock(struct lglock *lg);
 
index 8dad4a307bb8c4b086e8f6f62211d91d8c05a53a..28aeae46f355fdfdc96f28dd5a091dacbc85e9b8 100644 (file)
@@ -205,6 +205,7 @@ enum {
        ATA_LFLAG_SW_ACTIVITY   = (1 << 7), /* keep activity stats */
        ATA_LFLAG_NO_LPM        = (1 << 8), /* disable LPM on this link */
        ATA_LFLAG_RST_ONCE      = (1 << 9), /* limit recovery to one reset */
+       ATA_LFLAG_CHANGED       = (1 << 10), /* LPM state changed on this link */
 
        /* struct ata_port flags */
        ATA_FLAG_SLAVE_POSS     = (1 << 0), /* host supports slave dev */
@@ -309,6 +310,12 @@ enum {
         */
        ATA_TMOUT_PMP_SRST_WAIT = 5000,
 
+       /* When the LPM policy is set to ATA_LPM_MAX_POWER, there might
+        * be a spurious PHY event, so ignore the first PHY event that
+        * occurs within 10s after the policy change.
+        */
+       ATA_TMOUT_SPURIOUS_PHY  = 10000,
+
        /* ATA bus states */
        BUS_UNKNOWN             = 0,
        BUS_DMA                 = 1,
@@ -788,6 +795,8 @@ struct ata_link {
        struct ata_eh_context   eh_context;
 
        struct ata_device       device[ATA_MAX_DEVICES];
+
+       unsigned long           last_lpm_change; /* when last LPM change happened */
 };
 #define ATA_LINK_CLEAR_BEGIN           offsetof(struct ata_link, active_tag)
 #define ATA_LINK_CLEAR_END             offsetof(struct ata_link, device[0])
@@ -1201,6 +1210,7 @@ extern struct ata_device *ata_dev_pair(struct ata_device *adev);
 extern int ata_do_set_mode(struct ata_link *link, struct ata_device **r_failed_dev);
 extern void ata_scsi_port_error_handler(struct Scsi_Host *host, struct ata_port *ap);
 extern void ata_scsi_cmd_error_handler(struct Scsi_Host *host, struct ata_port *ap, struct list_head *eh_q);
+extern bool sata_lpm_ignore_phy_events(struct ata_link *link);
 
 extern int ata_cable_40wire(struct ata_port *ap);
 extern int ata_cable_80wire(struct ata_port *ap);
index 066ba4157541a94b0bfadf4b9fb2e245fc75ab8e..2722111591a398a8ad37e9001e1453d43a15cb1d 100644 (file)
@@ -130,8 +130,8 @@ enum bounce_type {
 };
 
 struct lock_class_stats {
-       unsigned long                   contention_point[4];
-       unsigned long                   contending_point[4];
+       unsigned long                   contention_point[LOCKSTAT_POINTS];
+       unsigned long                   contending_point[LOCKSTAT_POINTS];
        struct lock_time                read_waittime;
        struct lock_time                write_waittime;
        struct lock_time                read_holdtime;
index 72dff5fb0d0ceaf0386d80f1cba83c7bb2c7632f..6c8918114804fda89d00ed3e6b1482539f2dd4ee 100644 (file)
@@ -463,6 +463,8 @@ memcg_kmem_newpage_charge(gfp_t gfp, struct mem_cgroup **memcg, int order)
        if (!memcg_kmem_enabled())
                return true;
 
+       if (gfp & __GFP_NOACCOUNT)
+               return true;
        /*
         * __GFP_NOFAIL allocations will move on even if charging is not
         * possible. Therefore we don't even try, and have this allocation
@@ -522,6 +524,8 @@ memcg_kmem_get_cache(struct kmem_cache *cachep, gfp_t gfp)
 {
        if (!memcg_kmem_enabled())
                return cachep;
+       if (gfp & __GFP_NOACCOUNT)
+               return cachep;
        if (gfp & __GFP_NOFAIL)
                return cachep;
        if (in_interrupt() || (!current->mm) || (current->flags & PF_KTHREAD))
index c8990779f0c33b99e552ca9406621cde03f49443..d8c6334cd15005c16162f57959e20b2e09f99d03 100644 (file)
@@ -1,16 +1,15 @@
 #ifndef _LINUX_NAMEI_H
 #define _LINUX_NAMEI_H
 
-#include <linux/dcache.h>
-#include <linux/errno.h>
-#include <linux/linkage.h>
+#include <linux/kernel.h>
 #include <linux/path.h>
-
-struct vfsmount;
-struct nameidata;
+#include <linux/fcntl.h>
+#include <linux/errno.h>
 
 enum { MAX_NESTED_LINKS = 8 };
 
+#define MAXSYMLINKS 40
+
 /*
  * Type of the last component on LOOKUP_PARENT
  */
@@ -45,13 +44,29 @@ enum {LAST_NORM, LAST_ROOT, LAST_DOT, LAST_DOTDOT, LAST_BIND};
 #define LOOKUP_ROOT            0x2000
 #define LOOKUP_EMPTY           0x4000
 
-extern int user_path_at(int, const char __user *, unsigned, struct path *);
 extern int user_path_at_empty(int, const char __user *, unsigned, struct path *, int *empty);
 
-#define user_path(name, path) user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW, path)
-#define user_lpath(name, path) user_path_at(AT_FDCWD, name, 0, path)
-#define user_path_dir(name, path) \
-       user_path_at(AT_FDCWD, name, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, path)
+static inline int user_path_at(int dfd, const char __user *name, unsigned flags,
+                struct path *path)
+{
+       return user_path_at_empty(dfd, name, flags, path, NULL);
+}
+
+static inline int user_path(const char __user *name, struct path *path)
+{
+       return user_path_at_empty(AT_FDCWD, name, LOOKUP_FOLLOW, path, NULL);
+}
+
+static inline int user_lpath(const char __user *name, struct path *path)
+{
+       return user_path_at_empty(AT_FDCWD, name, 0, path, NULL);
+}
+
+static inline int user_path_dir(const char __user *name, struct path *path)
+{
+       return user_path_at_empty(AT_FDCWD, name,
+                                 LOOKUP_FOLLOW | LOOKUP_DIRECTORY, path, NULL);
+}
 
 extern int kern_path(const char *, unsigned, struct path *);
 
@@ -70,9 +85,7 @@ extern int follow_up(struct path *);
 extern struct dentry *lock_rename(struct dentry *, struct dentry *);
 extern void unlock_rename(struct dentry *, struct dentry *);
 
-extern void nd_jump_link(struct nameidata *nd, struct path *path);
-extern void nd_set_link(struct nameidata *nd, char *path);
-extern char *nd_get_link(struct nameidata *nd);
+extern void nd_jump_link(struct path *path);
 
 static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
 {
index 1899c74a712791ba33f7e17f2d139da21da70c42..05b9a694e21312ad26beec7dfa0f32f719cc8c87 100644 (file)
@@ -25,7 +25,6 @@
 #ifndef _LINUX_NETDEVICE_H
 #define _LINUX_NETDEVICE_H
 
-#include <linux/pm_qos.h>
 #include <linux/timer.h>
 #include <linux/bug.h>
 #include <linux/delay.h>
@@ -1499,8 +1498,6 @@ enum netdev_priv_flags {
  *
  *     @qdisc_tx_busylock:     XXX: need comments on this one
  *
- *     @pm_qos_req:    Power Management QoS object
- *
  *     FIXME: cleanup struct net_device such that network protocol info
  *     moves out.
  */
index ddeaae6d2083b256b21b930f3eed8182510e26a8..b871ff9d81d7207333fa021e6a95cb6bdbcf34ac 100644 (file)
@@ -121,6 +121,8 @@ extern struct device_node *of_stdout;
 extern raw_spinlock_t devtree_lock;
 
 #ifdef CONFIG_OF
+void of_core_init(void);
+
 static inline bool is_of_node(struct fwnode_handle *fwnode)
 {
        return fwnode && fwnode->type == FWNODE_OF;
@@ -376,6 +378,10 @@ bool of_console_check(struct device_node *dn, char *name, int index);
 
 #else /* CONFIG_OF */
 
+static inline void of_core_init(void)
+{
+}
+
 static inline bool is_of_node(struct fwnode_handle *fwnode)
 {
        return false;
index 3a6490e81b2856821ca190f438c039136a7fa207..703ea5c30a33f84bd60fcc19415b48c53084f5c7 100644 (file)
@@ -32,4 +32,9 @@ static inline void osq_lock_init(struct optimistic_spin_queue *lock)
 extern bool osq_lock(struct optimistic_spin_queue *lock);
 extern void osq_unlock(struct optimistic_spin_queue *lock);
 
+static inline bool osq_is_locked(struct optimistic_spin_queue *lock)
+{
+       return atomic_read(&lock->tail) != OSQ_UNLOCKED_VAL;
+}
+
 #endif
index 50e50095c8d172777c4ea2857435444385b81ece..84a1094496100906c1b89714f921451a00babb9f 100644 (file)
@@ -41,7 +41,12 @@ void percpu_counter_destroy(struct percpu_counter *fbc);
 void percpu_counter_set(struct percpu_counter *fbc, s64 amount);
 void __percpu_counter_add(struct percpu_counter *fbc, s64 amount, s32 batch);
 s64 __percpu_counter_sum(struct percpu_counter *fbc);
-int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs);
+int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
+
+static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
+{
+       return __percpu_counter_compare(fbc, rhs, percpu_counter_batch);
+}
 
 static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
 {
@@ -116,6 +121,12 @@ static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
                return 0;
 }
 
+static inline int
+__percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
+{
+       return percpu_counter_compare(fbc, rhs);
+}
+
 static inline void
 percpu_counter_add(struct percpu_counter *fbc, s64 amount)
 {
index 61992cf2e9771699ee06595c8fbb1bd39633018a..1b82d44b0a02d278f980acb0ae2158d2462fb329 100644 (file)
@@ -92,8 +92,6 @@ struct hw_perf_event_extra {
        int             idx;    /* index in shared_regs->regs[] */
 };
 
-struct event_constraint;
-
 /**
  * struct hw_perf_event - performance event hardware details:
  */
@@ -112,8 +110,6 @@ struct hw_perf_event {
 
                        struct hw_perf_event_extra extra_reg;
                        struct hw_perf_event_extra branch_reg;
-
-                       struct event_constraint *constraint;
                };
                struct { /* software */
                        struct hrtimer  hrtimer;
@@ -124,7 +120,7 @@ struct hw_perf_event {
                };
                struct { /* intel_cqm */
                        int                     cqm_state;
-                       int                     cqm_rmid;
+                       u32                     cqm_rmid;
                        struct list_head        cqm_events_entry;
                        struct list_head        cqm_groups_entry;
                        struct list_head        cqm_group_entry;
@@ -566,8 +562,12 @@ struct perf_cpu_context {
        struct perf_event_context       *task_ctx;
        int                             active_oncpu;
        int                             exclusive;
+
+       raw_spinlock_t                  hrtimer_lock;
        struct hrtimer                  hrtimer;
        ktime_t                         hrtimer_interval;
+       unsigned int                    hrtimer_active;
+
        struct pmu                      *unique_pmu;
        struct perf_cgroup              *cgrp;
 };
@@ -734,6 +734,22 @@ extern int perf_event_overflow(struct perf_event *event,
                                 struct perf_sample_data *data,
                                 struct pt_regs *regs);
 
+extern void perf_event_output(struct perf_event *event,
+                               struct perf_sample_data *data,
+                               struct pt_regs *regs);
+
+extern void
+perf_event_header__init_id(struct perf_event_header *header,
+                          struct perf_sample_data *data,
+                          struct perf_event *event);
+extern void
+perf_event__output_id_sample(struct perf_event *event,
+                            struct perf_output_handle *handle,
+                            struct perf_sample_data *sample);
+
+extern void
+perf_log_lost_samples(struct perf_event *event, u64 lost);
+
 static inline bool is_sampling_event(struct perf_event *event)
 {
        return event->attr.sample_period != 0;
@@ -798,11 +814,33 @@ perf_sw_event_sched(u32 event_id, u64 nr, u64 addr)
 
 extern struct static_key_deferred perf_sched_events;
 
+static __always_inline bool
+perf_sw_migrate_enabled(void)
+{
+       if (static_key_false(&perf_swevent_enabled[PERF_COUNT_SW_CPU_MIGRATIONS]))
+               return true;
+       return false;
+}
+
+static inline void perf_event_task_migrate(struct task_struct *task)
+{
+       if (perf_sw_migrate_enabled())
+               task->sched_migrated = 1;
+}
+
 static inline void perf_event_task_sched_in(struct task_struct *prev,
                                            struct task_struct *task)
 {
        if (static_key_false(&perf_sched_events.key))
                __perf_event_task_sched_in(prev, task);
+
+       if (perf_sw_migrate_enabled() && task->sched_migrated) {
+               struct pt_regs *regs = this_cpu_ptr(&__perf_regs[0]);
+
+               perf_fetch_caller_regs(regs);
+               ___perf_sw_event(PERF_COUNT_SW_CPU_MIGRATIONS, 1, regs, 0);
+               task->sched_migrated = 0;
+       }
 }
 
 static inline void perf_event_task_sched_out(struct task_struct *prev,
@@ -925,6 +963,8 @@ perf_aux_output_skip(struct perf_output_handle *handle,
 static inline void *
 perf_get_aux(struct perf_output_handle *handle)                                { return NULL; }
 static inline void
+perf_event_task_migrate(struct task_struct *task)                      { }
+static inline void
 perf_event_task_sched_in(struct task_struct *prev,
                         struct task_struct *task)                      { }
 static inline void
index a947ab8b441ad968f0953e6ce15d03270325e55b..533d9807e543701099cb77dbbb780513b7d48381 100644 (file)
@@ -5,8 +5,6 @@
 #ifndef __LINUX_PLATFORM_DATA_SI5351_H__
 #define __LINUX_PLATFORM_DATA_SI5351_H__
 
-struct clk;
-
 /**
  * enum si5351_pll_src - Si5351 pll clock source
  * @SI5351_PLL_SRC_DEFAULT: default, do not change eeprom config
@@ -107,8 +105,6 @@ struct si5351_clkout_config {
  * @clkout: array of clkout configuration
  */
 struct si5351_platform_data {
-       struct clk *clk_xtal;
-       struct clk *clk_clkin;
        enum si5351_pll_src pll_src[2];
        struct si5351_clkout_config clkout[8];
 };
index de83b4eb164287db363328f87c0f8af216497a91..0f1534acaf60983da1cc84548659ccc1398d79e9 100644 (file)
 #include <linux/list.h>
 
 /*
- * We use the MSB mostly because its available; see <linux/preempt_mask.h> for
- * the other bits -- can't include that header due to inclusion hell.
+ * We put the hardirq and softirq counter into the preemption
+ * counter. The bitmask has the following meaning:
+ *
+ * - bits 0-7 are the preemption count (max preemption depth: 256)
+ * - bits 8-15 are the softirq count (max # of softirqs: 256)
+ *
+ * The hardirq count could in theory be the same as the number of
+ * interrupts in the system, but we run all interrupt handlers with
+ * interrupts disabled, so we cannot have nesting interrupts. Though
+ * there are a few palaeontologic drivers which reenable interrupts in
+ * the handler, so we need more than one bit here.
+ *
+ *         PREEMPT_MASK:       0x000000ff
+ *         SOFTIRQ_MASK:       0x0000ff00
+ *         HARDIRQ_MASK:       0x000f0000
+ *             NMI_MASK:       0x00100000
+ *       PREEMPT_ACTIVE:       0x00200000
+ * PREEMPT_NEED_RESCHED:       0x80000000
  */
+#define PREEMPT_BITS   8
+#define SOFTIRQ_BITS   8
+#define HARDIRQ_BITS   4
+#define NMI_BITS       1
+
+#define PREEMPT_SHIFT  0
+#define SOFTIRQ_SHIFT  (PREEMPT_SHIFT + PREEMPT_BITS)
+#define HARDIRQ_SHIFT  (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
+#define NMI_SHIFT      (HARDIRQ_SHIFT + HARDIRQ_BITS)
+
+#define __IRQ_MASK(x)  ((1UL << (x))-1)
+
+#define PREEMPT_MASK   (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
+#define SOFTIRQ_MASK   (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
+#define HARDIRQ_MASK   (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
+#define NMI_MASK       (__IRQ_MASK(NMI_BITS)     << NMI_SHIFT)
+
+#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
+#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
+#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
+#define NMI_OFFSET     (1UL << NMI_SHIFT)
+
+#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
+
+#define PREEMPT_ACTIVE_BITS    1
+#define PREEMPT_ACTIVE_SHIFT   (NMI_SHIFT + NMI_BITS)
+#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
+
+/* We use the MSB mostly because its available */
 #define PREEMPT_NEED_RESCHED   0x80000000
 
+/* preempt_count() and related functions, depends on PREEMPT_NEED_RESCHED */
 #include <asm/preempt.h>
 
+#define hardirq_count()        (preempt_count() & HARDIRQ_MASK)
+#define softirq_count()        (preempt_count() & SOFTIRQ_MASK)
+#define irq_count()    (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
+                                | NMI_MASK))
+
+/*
+ * Are we doing bottom half or hardware interrupt processing?
+ * Are we in a softirq context? Interrupt context?
+ * in_softirq - Are we currently processing softirq or have bh disabled?
+ * in_serving_softirq - Are we currently processing softirq?
+ */
+#define in_irq()               (hardirq_count())
+#define in_softirq()           (softirq_count())
+#define in_interrupt()         (irq_count())
+#define in_serving_softirq()   (softirq_count() & SOFTIRQ_OFFSET)
+
+/*
+ * Are we in NMI context?
+ */
+#define in_nmi()       (preempt_count() & NMI_MASK)
+
+#if defined(CONFIG_PREEMPT_COUNT)
+# define PREEMPT_DISABLE_OFFSET 1
+#else
+# define PREEMPT_DISABLE_OFFSET 0
+#endif
+
+/*
+ * The preempt_count offset needed for things like:
+ *
+ *  spin_lock_bh()
+ *
+ * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and
+ * softirqs, such that unlock sequences of:
+ *
+ *  spin_unlock();
+ *  local_bh_enable();
+ *
+ * Work as expected.
+ */
+#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_DISABLE_OFFSET)
+
+/*
+ * Are we running in atomic context?  WARNING: this macro cannot
+ * always detect atomic context; in particular, it cannot know about
+ * held spinlocks in non-preemptible kernels.  Thus it should not be
+ * used in the general case to determine whether sleeping is possible.
+ * Do not use in_atomic() in driver code.
+ */
+#define in_atomic()    (preempt_count() != 0)
+
+/*
+ * Check whether we were atomic before we did preempt_disable():
+ * (used by the scheduler)
+ */
+#define in_atomic_preempt_off() \
+               ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_DISABLE_OFFSET)
+
 #if defined(CONFIG_DEBUG_PREEMPT) || defined(CONFIG_PREEMPT_TRACER)
 extern void preempt_count_add(int val);
 extern void preempt_count_sub(int val);
@@ -33,6 +137,18 @@ extern void preempt_count_sub(int val);
 #define preempt_count_inc() preempt_count_add(1)
 #define preempt_count_dec() preempt_count_sub(1)
 
+#define preempt_active_enter() \
+do { \
+       preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
+       barrier(); \
+} while (0)
+
+#define preempt_active_exit() \
+do { \
+       barrier(); \
+       preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET); \
+} while (0)
+
 #ifdef CONFIG_PREEMPT_COUNT
 
 #define preempt_disable() \
@@ -49,6 +165,8 @@ do { \
 
 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
 
+#define preemptible()  (preempt_count() == 0 && !irqs_disabled())
+
 #ifdef CONFIG_PREEMPT
 #define preempt_enable() \
 do { \
@@ -57,52 +175,46 @@ do { \
                __preempt_schedule(); \
 } while (0)
 
+#define preempt_enable_notrace() \
+do { \
+       barrier(); \
+       if (unlikely(__preempt_count_dec_and_test())) \
+               __preempt_schedule_notrace(); \
+} while (0)
+
 #define preempt_check_resched() \
 do { \
        if (should_resched()) \
                __preempt_schedule(); \
 } while (0)
 
-#else
+#else /* !CONFIG_PREEMPT */
 #define preempt_enable() \
 do { \
        barrier(); \
        preempt_count_dec(); \
 } while (0)
-#define preempt_check_resched() do { } while (0)
-#endif
-
-#define preempt_disable_notrace() \
-do { \
-       __preempt_count_inc(); \
-       barrier(); \
-} while (0)
 
-#define preempt_enable_no_resched_notrace() \
+#define preempt_enable_notrace() \
 do { \
        barrier(); \
        __preempt_count_dec(); \
 } while (0)
 
-#ifdef CONFIG_PREEMPT
-
-#ifndef CONFIG_CONTEXT_TRACKING
-#define __preempt_schedule_context() __preempt_schedule()
-#endif
+#define preempt_check_resched() do { } while (0)
+#endif /* CONFIG_PREEMPT */
 
-#define preempt_enable_notrace() \
+#define preempt_disable_notrace() \
 do { \
+       __preempt_count_inc(); \
        barrier(); \
-       if (unlikely(__preempt_count_dec_and_test())) \
-               __preempt_schedule_context(); \
 } while (0)
-#else
-#define preempt_enable_notrace() \
+
+#define preempt_enable_no_resched_notrace() \
 do { \
        barrier(); \
        __preempt_count_dec(); \
 } while (0)
-#endif
 
 #else /* !CONFIG_PREEMPT_COUNT */
 
@@ -121,6 +233,7 @@ do { \
 #define preempt_disable_notrace()              barrier()
 #define preempt_enable_no_resched_notrace()    barrier()
 #define preempt_enable_notrace()               barrier()
+#define preemptible()                          0
 
 #endif /* CONFIG_PREEMPT_COUNT */
 
diff --git a/include/linux/preempt_mask.h b/include/linux/preempt_mask.h
deleted file mode 100644 (file)
index dbeec4d..0000000
+++ /dev/null
@@ -1,117 +0,0 @@
-#ifndef LINUX_PREEMPT_MASK_H
-#define LINUX_PREEMPT_MASK_H
-
-#include <linux/preempt.h>
-
-/*
- * We put the hardirq and softirq counter into the preemption
- * counter. The bitmask has the following meaning:
- *
- * - bits 0-7 are the preemption count (max preemption depth: 256)
- * - bits 8-15 are the softirq count (max # of softirqs: 256)
- *
- * The hardirq count could in theory be the same as the number of
- * interrupts in the system, but we run all interrupt handlers with
- * interrupts disabled, so we cannot have nesting interrupts. Though
- * there are a few palaeontologic drivers which reenable interrupts in
- * the handler, so we need more than one bit here.
- *
- * PREEMPT_MASK:       0x000000ff
- * SOFTIRQ_MASK:       0x0000ff00
- * HARDIRQ_MASK:       0x000f0000
- *     NMI_MASK:       0x00100000
- * PREEMPT_ACTIVE:     0x00200000
- */
-#define PREEMPT_BITS   8
-#define SOFTIRQ_BITS   8
-#define HARDIRQ_BITS   4
-#define NMI_BITS       1
-
-#define PREEMPT_SHIFT  0
-#define SOFTIRQ_SHIFT  (PREEMPT_SHIFT + PREEMPT_BITS)
-#define HARDIRQ_SHIFT  (SOFTIRQ_SHIFT + SOFTIRQ_BITS)
-#define NMI_SHIFT      (HARDIRQ_SHIFT + HARDIRQ_BITS)
-
-#define __IRQ_MASK(x)  ((1UL << (x))-1)
-
-#define PREEMPT_MASK   (__IRQ_MASK(PREEMPT_BITS) << PREEMPT_SHIFT)
-#define SOFTIRQ_MASK   (__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
-#define HARDIRQ_MASK   (__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
-#define NMI_MASK       (__IRQ_MASK(NMI_BITS)     << NMI_SHIFT)
-
-#define PREEMPT_OFFSET (1UL << PREEMPT_SHIFT)
-#define SOFTIRQ_OFFSET (1UL << SOFTIRQ_SHIFT)
-#define HARDIRQ_OFFSET (1UL << HARDIRQ_SHIFT)
-#define NMI_OFFSET     (1UL << NMI_SHIFT)
-
-#define SOFTIRQ_DISABLE_OFFSET (2 * SOFTIRQ_OFFSET)
-
-#define PREEMPT_ACTIVE_BITS    1
-#define PREEMPT_ACTIVE_SHIFT   (NMI_SHIFT + NMI_BITS)
-#define PREEMPT_ACTIVE (__IRQ_MASK(PREEMPT_ACTIVE_BITS) << PREEMPT_ACTIVE_SHIFT)
-
-#define hardirq_count()        (preempt_count() & HARDIRQ_MASK)
-#define softirq_count()        (preempt_count() & SOFTIRQ_MASK)
-#define irq_count()    (preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
-                                | NMI_MASK))
-
-/*
- * Are we doing bottom half or hardware interrupt processing?
- * Are we in a softirq context? Interrupt context?
- * in_softirq - Are we currently processing softirq or have bh disabled?
- * in_serving_softirq - Are we currently processing softirq?
- */
-#define in_irq()               (hardirq_count())
-#define in_softirq()           (softirq_count())
-#define in_interrupt()         (irq_count())
-#define in_serving_softirq()   (softirq_count() & SOFTIRQ_OFFSET)
-
-/*
- * Are we in NMI context?
- */
-#define in_nmi()       (preempt_count() & NMI_MASK)
-
-#if defined(CONFIG_PREEMPT_COUNT)
-# define PREEMPT_CHECK_OFFSET 1
-#else
-# define PREEMPT_CHECK_OFFSET 0
-#endif
-
-/*
- * The preempt_count offset needed for things like:
- *
- *  spin_lock_bh()
- *
- * Which need to disable both preemption (CONFIG_PREEMPT_COUNT) and
- * softirqs, such that unlock sequences of:
- *
- *  spin_unlock();
- *  local_bh_enable();
- *
- * Work as expected.
- */
-#define SOFTIRQ_LOCK_OFFSET (SOFTIRQ_DISABLE_OFFSET + PREEMPT_CHECK_OFFSET)
-
-/*
- * Are we running in atomic context?  WARNING: this macro cannot
- * always detect atomic context; in particular, it cannot know about
- * held spinlocks in non-preemptible kernels.  Thus it should not be
- * used in the general case to determine whether sleeping is possible.
- * Do not use in_atomic() in driver code.
- */
-#define in_atomic()    ((preempt_count() & ~PREEMPT_ACTIVE) != 0)
-
-/*
- * Check whether we were atomic before we did preempt_disable():
- * (used by the scheduler, *after* releasing the kernel lock)
- */
-#define in_atomic_preempt_off() \
-               ((preempt_count() & ~PREEMPT_ACTIVE) != PREEMPT_CHECK_OFFSET)
-
-#ifdef CONFIG_PREEMPT_COUNT
-# define preemptible() (preempt_count() == 0 && !irqs_disabled())
-#else
-# define preemptible() 0
-#endif
-
-#endif /* LINUX_PREEMPT_MASK_H */
index a18b16f1dc0e44f7f5a3b99ea4f43d64c67b8a0c..17c6b1f84a77d3b3073bc272fffc6a6138b322b5 100644 (file)
@@ -29,8 +29,8 @@
  */
 static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
 {
-       ACCESS_ONCE(list->next) = list;
-       ACCESS_ONCE(list->prev) = list;
+       WRITE_ONCE(list->next, list);
+       WRITE_ONCE(list->prev, list);
 }
 
 /*
@@ -288,7 +288,7 @@ static inline void list_splice_init_rcu(struct list_head *list,
 #define list_first_or_null_rcu(ptr, type, member) \
 ({ \
        struct list_head *__ptr = (ptr); \
-       struct list_head *__next = ACCESS_ONCE(__ptr->next); \
+       struct list_head *__next = READ_ONCE(__ptr->next); \
        likely(__ptr != __next) ? list_entry_rcu(__next, type, member) : NULL; \
 })
 
@@ -549,8 +549,8 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
  */
 #define hlist_for_each_entry_from_rcu(pos, member)                     \
        for (; pos;                                                     \
-            pos = hlist_entry_safe(rcu_dereference((pos)->member.next),\
-                       typeof(*(pos)), member))
+            pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu( \
+                       &(pos)->member)), typeof(*(pos)), member))
 
 #endif /* __KERNEL__ */
 #endif
index 573a5afd5ed884d5bdcfc4af6cf88c3b9d25214d..33a056bb886faeedeb9690faefd3a4adeeedd14b 100644 (file)
@@ -44,6 +44,8 @@
 #include <linux/debugobjects.h>
 #include <linux/bug.h>
 #include <linux/compiler.h>
+#include <linux/ktime.h>
+
 #include <asm/barrier.h>
 
 extern int rcu_expedited; /* for sysctl */
@@ -292,10 +294,6 @@ void rcu_sched_qs(void);
 void rcu_bh_qs(void);
 void rcu_check_callbacks(int user);
 struct notifier_block;
-void rcu_idle_enter(void);
-void rcu_idle_exit(void);
-void rcu_irq_enter(void);
-void rcu_irq_exit(void);
 int rcu_cpu_notify(struct notifier_block *self,
                   unsigned long action, void *hcpu);
 
@@ -364,8 +362,8 @@ extern struct srcu_struct tasks_rcu_exit_srcu;
 #define rcu_note_voluntary_context_switch(t) \
        do { \
                rcu_all_qs(); \
-               if (ACCESS_ONCE((t)->rcu_tasks_holdout)) \
-                       ACCESS_ONCE((t)->rcu_tasks_holdout) = false; \
+               if (READ_ONCE((t)->rcu_tasks_holdout)) \
+                       WRITE_ONCE((t)->rcu_tasks_holdout, false); \
        } while (0)
 #else /* #ifdef CONFIG_TASKS_RCU */
 #define TASKS_RCU(x) do { } while (0)
@@ -609,7 +607,7 @@ static inline void rcu_preempt_sleep_check(void)
 
 #define __rcu_access_pointer(p, space) \
 ({ \
-       typeof(*p) *_________p1 = (typeof(*p) *__force)ACCESS_ONCE(p); \
+       typeof(*p) *_________p1 = (typeof(*p) *__force)READ_ONCE(p); \
        rcu_dereference_sparse(p, space); \
        ((typeof(*p) __force __kernel *)(_________p1)); \
 })
@@ -628,21 +626,6 @@ static inline void rcu_preempt_sleep_check(void)
        ((typeof(*p) __force __kernel *)(p)); \
 })
 
-#define __rcu_access_index(p, space) \
-({ \
-       typeof(p) _________p1 = ACCESS_ONCE(p); \
-       rcu_dereference_sparse(p, space); \
-       (_________p1); \
-})
-#define __rcu_dereference_index_check(p, c) \
-({ \
-       /* Dependency order vs. p above. */ \
-       typeof(p) _________p1 = lockless_dereference(p); \
-       rcu_lockdep_assert(c, \
-                          "suspicious rcu_dereference_index_check() usage"); \
-       (_________p1); \
-})
-
 /**
  * RCU_INITIALIZER() - statically initialize an RCU-protected global variable
  * @v: The value to statically initialize with.
@@ -659,7 +642,7 @@ static inline void rcu_preempt_sleep_check(void)
  */
 #define lockless_dereference(p) \
 ({ \
-       typeof(p) _________p1 = ACCESS_ONCE(p); \
+       typeof(p) _________p1 = READ_ONCE(p); \
        smp_read_barrier_depends(); /* Dependency order vs. p above. */ \
        (_________p1); \
 })
@@ -702,7 +685,7 @@ static inline void rcu_preempt_sleep_check(void)
  * @p: The pointer to read
  *
  * Return the value of the specified RCU-protected pointer, but omit the
- * smp_read_barrier_depends() and keep the ACCESS_ONCE().  This is useful
+ * smp_read_barrier_depends() and keep the READ_ONCE().  This is useful
  * when the value of this pointer is accessed, but the pointer is not
  * dereferenced, for example, when testing an RCU-protected pointer against
  * NULL.  Although rcu_access_pointer() may also be used in cases where
@@ -786,48 +769,13 @@ static inline void rcu_preempt_sleep_check(void)
  */
 #define rcu_dereference_raw_notrace(p) __rcu_dereference_check((p), 1, __rcu)
 
-/**
- * rcu_access_index() - fetch RCU index with no dereferencing
- * @p: The index to read
- *
- * Return the value of the specified RCU-protected index, but omit the
- * smp_read_barrier_depends() and keep the ACCESS_ONCE().  This is useful
- * when the value of this index is accessed, but the index is not
- * dereferenced, for example, when testing an RCU-protected index against
- * -1.  Although rcu_access_index() may also be used in cases where
- * update-side locks prevent the value of the index from changing, you
- * should instead use rcu_dereference_index_protected() for this use case.
- */
-#define rcu_access_index(p) __rcu_access_index((p), __rcu)
-
-/**
- * rcu_dereference_index_check() - rcu_dereference for indices with debug checking
- * @p: The pointer to read, prior to dereferencing
- * @c: The conditions under which the dereference will take place
- *
- * Similar to rcu_dereference_check(), but omits the sparse checking.
- * This allows rcu_dereference_index_check() to be used on integers,
- * which can then be used as array indices.  Attempting to use
- * rcu_dereference_check() on an integer will give compiler warnings
- * because the sparse address-space mechanism relies on dereferencing
- * the RCU-protected pointer.  Dereferencing integers is not something
- * that even gcc will put up with.
- *
- * Note that this function does not implicitly check for RCU read-side
- * critical sections.  If this function gains lots of uses, it might
- * make sense to provide versions for each flavor of RCU, but it does
- * not make sense as of early 2010.
- */
-#define rcu_dereference_index_check(p, c) \
-       __rcu_dereference_index_check((p), (c))
-
 /**
  * rcu_dereference_protected() - fetch RCU pointer when updates prevented
  * @p: The pointer to read, prior to dereferencing
  * @c: The conditions under which the dereference will take place
  *
  * Return the value of the specified RCU-protected pointer, but omit
- * both the smp_read_barrier_depends() and the ACCESS_ONCE().  This
+ * both the smp_read_barrier_depends() and the READ_ONCE().  This
  * is useful in cases where update-side locks prevent the value of the
  * pointer from changing.  Please note that this primitive does -not-
  * prevent the compiler from repeating this reference or combining it
@@ -1153,13 +1101,13 @@ static inline notrace void rcu_read_unlock_sched_notrace(void)
 #define kfree_rcu(ptr, rcu_head)                                       \
        __kfree_rcu(&((ptr)->rcu_head), offsetof(typeof(*(ptr)), rcu_head))
 
-#if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL)
-static inline int rcu_needs_cpu(unsigned long *delta_jiffies)
+#ifdef CONFIG_TINY_RCU
+static inline int rcu_needs_cpu(u64 basemono, u64 *nextevt)
 {
-       *delta_jiffies = ULONG_MAX;
+       *nextevt = KTIME_MAX;
        return 0;
 }
-#endif /* #if defined(CONFIG_TINY_RCU) || defined(CONFIG_RCU_NOCB_CPU_ALL) */
+#endif /* #ifdef CONFIG_TINY_RCU */
 
 #if defined(CONFIG_RCU_NOCB_CPU_ALL)
 static inline bool rcu_is_nocb_cpu(int cpu) { return true; }
index 937edaeb150deb17759a9c0c715630fd0cc9a729..3df6c1ec4e25503583cb14656474d6727495a530 100644 (file)
@@ -159,6 +159,22 @@ static inline void rcu_cpu_stall_reset(void)
 {
 }
 
+static inline void rcu_idle_enter(void)
+{
+}
+
+static inline void rcu_idle_exit(void)
+{
+}
+
+static inline void rcu_irq_enter(void)
+{
+}
+
+static inline void rcu_irq_exit(void)
+{
+}
+
 static inline void exit_rcu(void)
 {
 }
index d2e583a6aacacf09ee9dc3bf3646b6a3cff3494e..456879143f89f9db45d0f79315f728f50a9f9d0c 100644 (file)
@@ -31,9 +31,7 @@
 #define __LINUX_RCUTREE_H
 
 void rcu_note_context_switch(void);
-#ifndef CONFIG_RCU_NOCB_CPU_ALL
-int rcu_needs_cpu(unsigned long *delta_jiffies);
-#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
+int rcu_needs_cpu(u64 basem, u64 *nextevt);
 void rcu_cpu_stall_reset(void);
 
 /*
@@ -93,6 +91,11 @@ void rcu_force_quiescent_state(void);
 void rcu_bh_force_quiescent_state(void);
 void rcu_sched_force_quiescent_state(void);
 
+void rcu_idle_enter(void);
+void rcu_idle_exit(void);
+void rcu_irq_enter(void);
+void rcu_irq_exit(void);
+
 void exit_rcu(void);
 
 void rcu_scheduler_starting(void);
index dbcbcc59aa92e77de6c1c6065b403c0fa7d0dd15..843ceca9a21e5f1327fa5c82fa5f3089c5ebab23 100644 (file)
@@ -17,6 +17,7 @@
 #ifndef _LINUX_RHASHTABLE_H
 #define _LINUX_RHASHTABLE_H
 
+#include <linux/atomic.h>
 #include <linux/compiler.h>
 #include <linux/errno.h>
 #include <linux/jhash.h>
@@ -100,6 +101,7 @@ struct rhashtable;
  * @key_len: Length of key
  * @key_offset: Offset of key in struct to be hashed
  * @head_offset: Offset of rhash_head in struct to be hashed
+ * @insecure_max_entries: Maximum number of entries (may be exceeded)
  * @max_size: Maximum size while expanding
  * @min_size: Minimum size while shrinking
  * @nulls_base: Base value to generate nulls marker
@@ -115,6 +117,7 @@ struct rhashtable_params {
        size_t                  key_len;
        size_t                  key_offset;
        size_t                  head_offset;
+       unsigned int            insecure_max_entries;
        unsigned int            max_size;
        unsigned int            min_size;
        u32                     nulls_base;
@@ -286,6 +289,18 @@ static inline bool rht_grow_above_100(const struct rhashtable *ht,
                (!ht->p.max_size || tbl->size < ht->p.max_size);
 }
 
+/**
+ * rht_grow_above_max - returns true if table is above maximum
+ * @ht:                hash table
+ * @tbl:       current table
+ */
+static inline bool rht_grow_above_max(const struct rhashtable *ht,
+                                     const struct bucket_table *tbl)
+{
+       return ht->p.insecure_max_entries &&
+              atomic_read(&ht->nelems) >= ht->p.insecure_max_entries;
+}
+
 /* The bucket lock is selected based on the hash and protects mutations
  * on a group of hash buckets.
  *
@@ -589,6 +604,10 @@ restart:
                goto out;
        }
 
+       err = -E2BIG;
+       if (unlikely(rht_grow_above_max(ht, tbl)))
+               goto out;
+
        if (unlikely(rht_grow_above_100(ht, tbl))) {
 slow_path:
                spin_unlock_bh(lock);
index 185a750e4ed4541c2a1a2e5c30993af1136c2d3a..6633e83e608ab55fea2c5b5b64ef0058b12a035b 100644 (file)
@@ -25,7 +25,7 @@ struct sched_param {
 #include <linux/errno.h>
 #include <linux/nodemask.h>
 #include <linux/mm_types.h>
-#include <linux/preempt_mask.h>
+#include <linux/preempt.h>
 
 #include <asm/page.h>
 #include <asm/ptrace.h>
@@ -132,6 +132,7 @@ struct fs_struct;
 struct perf_event_context;
 struct blk_plug;
 struct filename;
+struct nameidata;
 
 #define VMACACHE_BITS 2
 #define VMACACHE_SIZE (1U << VMACACHE_BITS)
@@ -173,7 +174,12 @@ extern unsigned long nr_iowait_cpu(int cpu);
 extern void get_iowait_load(unsigned long *nr_waiters, unsigned long *load);
 
 extern void calc_global_load(unsigned long ticks);
+
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
 extern void update_cpu_load_nohz(void);
+#else
+static inline void update_cpu_load_nohz(void) { }
+#endif
 
 extern unsigned long get_parent_ip(unsigned long addr);
 
@@ -213,9 +219,10 @@ print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq);
 #define TASK_WAKEKILL          128
 #define TASK_WAKING            256
 #define TASK_PARKED            512
-#define TASK_STATE_MAX         1024
+#define TASK_NOLOAD            1024
+#define TASK_STATE_MAX         2048
 
-#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWP"
+#define TASK_STATE_TO_CHAR_STR "RSDTtXZxKWPN"
 
 extern char ___assert_task_state[1 - 2*!!(
                sizeof(TASK_STATE_TO_CHAR_STR)-1 != ilog2(TASK_STATE_MAX)+1)];
@@ -225,6 +232,8 @@ extern char ___assert_task_state[1 - 2*!!(
 #define TASK_STOPPED           (TASK_WAKEKILL | __TASK_STOPPED)
 #define TASK_TRACED            (TASK_WAKEKILL | __TASK_TRACED)
 
+#define TASK_IDLE              (TASK_UNINTERRUPTIBLE | TASK_NOLOAD)
+
 /* Convenience macros for the sake of wake_up */
 #define TASK_NORMAL            (TASK_INTERRUPTIBLE | TASK_UNINTERRUPTIBLE)
 #define TASK_ALL               (TASK_NORMAL | __TASK_STOPPED | __TASK_TRACED)
@@ -240,7 +249,8 @@ extern char ___assert_task_state[1 - 2*!!(
                        ((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
 #define task_contributes_to_load(task) \
                                ((task->state & TASK_UNINTERRUPTIBLE) != 0 && \
-                                (task->flags & PF_FROZEN) == 0)
+                                (task->flags & PF_FROZEN) == 0 && \
+                                (task->state & TASK_NOLOAD) == 0)
 
 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
 
@@ -252,7 +262,7 @@ extern char ___assert_task_state[1 - 2*!!(
 #define set_task_state(tsk, state_value)                       \
        do {                                                    \
                (tsk)->task_state_change = _THIS_IP_;           \
-               set_mb((tsk)->state, (state_value));            \
+               smp_store_mb((tsk)->state, (state_value));              \
        } while (0)
 
 /*
@@ -274,7 +284,7 @@ extern char ___assert_task_state[1 - 2*!!(
 #define set_current_state(state_value)                         \
        do {                                                    \
                current->task_state_change = _THIS_IP_;         \
-               set_mb(current->state, (state_value));          \
+               smp_store_mb(current->state, (state_value));            \
        } while (0)
 
 #else
@@ -282,7 +292,7 @@ extern char ___assert_task_state[1 - 2*!!(
 #define __set_task_state(tsk, state_value)             \
        do { (tsk)->state = (state_value); } while (0)
 #define set_task_state(tsk, state_value)               \
-       set_mb((tsk)->state, (state_value))
+       smp_store_mb((tsk)->state, (state_value))
 
 /*
  * set_current_state() includes a barrier so that the write of current->state
@@ -298,7 +308,7 @@ extern char ___assert_task_state[1 - 2*!!(
 #define __set_current_state(state_value)               \
        do { current->state = (state_value); } while (0)
 #define set_current_state(state_value)                 \
-       set_mb(current->state, (state_value))
+       smp_store_mb(current->state, (state_value))
 
 #endif
 
@@ -335,14 +345,10 @@ extern int runqueue_is_locked(int cpu);
 #if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
 extern void nohz_balance_enter_idle(int cpu);
 extern void set_cpu_sd_state_idle(void);
-extern int get_nohz_timer_target(int pinned);
+extern int get_nohz_timer_target(void);
 #else
 static inline void nohz_balance_enter_idle(int cpu) { }
 static inline void set_cpu_sd_state_idle(void) { }
-static inline int get_nohz_timer_target(int pinned)
-{
-       return smp_processor_id();
-}
 #endif
 
 /*
@@ -567,6 +573,23 @@ struct task_cputime {
                .sum_exec_runtime = 0,                          \
        }
 
+/*
+ * This is the atomic variant of task_cputime, which can be used for
+ * storing and updating task_cputime statistics without locking.
+ */
+struct task_cputime_atomic {
+       atomic64_t utime;
+       atomic64_t stime;
+       atomic64_t sum_exec_runtime;
+};
+
+#define INIT_CPUTIME_ATOMIC \
+       (struct task_cputime_atomic) {                          \
+               .utime = ATOMIC64_INIT(0),                      \
+               .stime = ATOMIC64_INIT(0),                      \
+               .sum_exec_runtime = ATOMIC64_INIT(0),           \
+       }
+
 #ifdef CONFIG_PREEMPT_COUNT
 #define PREEMPT_DISABLED       (1 + PREEMPT_ENABLED)
 #else
@@ -584,18 +607,16 @@ struct task_cputime {
 
 /**
  * struct thread_group_cputimer - thread group interval timer counts
- * @cputime:           thread group interval timers.
+ * @cputime_atomic:    atomic thread group interval timers.
  * @running:           non-zero when there are timers running and
  *                     @cputime receives updates.
- * @lock:              lock for fields in this struct.
  *
  * This structure contains the version of task_cputime, above, that is
  * used for thread group CPU timer calculations.
  */
 struct thread_group_cputimer {
-       struct task_cputime cputime;
+       struct task_cputime_atomic cputime_atomic;
        int running;
-       raw_spinlock_t lock;
 };
 
 #include <linux/rwsem.h>
@@ -899,6 +920,50 @@ enum cpu_idle_type {
 #define SCHED_CAPACITY_SHIFT   10
 #define SCHED_CAPACITY_SCALE   (1L << SCHED_CAPACITY_SHIFT)
 
+/*
+ * Wake-queues are lists of tasks with a pending wakeup, whose
+ * callers have already marked the task as woken internally,
+ * and can thus carry on. A common use case is being able to
+ * do the wakeups once the corresponding user lock as been
+ * released.
+ *
+ * We hold reference to each task in the list across the wakeup,
+ * thus guaranteeing that the memory is still valid by the time
+ * the actual wakeups are performed in wake_up_q().
+ *
+ * One per task suffices, because there's never a need for a task to be
+ * in two wake queues simultaneously; it is forbidden to abandon a task
+ * in a wake queue (a call to wake_up_q() _must_ follow), so if a task is
+ * already in a wake queue, the wakeup will happen soon and the second
+ * waker can just skip it.
+ *
+ * The WAKE_Q macro declares and initializes the list head.
+ * wake_up_q() does NOT reinitialize the list; it's expected to be
+ * called near the end of a function, where the fact that the queue is
+ * not used again will be easy to see by inspection.
+ *
+ * Note that this can cause spurious wakeups. schedule() callers
+ * must ensure the call is done inside a loop, confirming that the
+ * wakeup condition has in fact occurred.
+ */
+struct wake_q_node {
+       struct wake_q_node *next;
+};
+
+struct wake_q_head {
+       struct wake_q_node *first;
+       struct wake_q_node **lastp;
+};
+
+#define WAKE_Q_TAIL ((struct wake_q_node *) 0x01)
+
+#define WAKE_Q(name)                                   \
+       struct wake_q_head name = { WAKE_Q_TAIL, &name.first }
+
+extern void wake_q_add(struct wake_q_head *head,
+                      struct task_struct *task);
+extern void wake_up_q(struct wake_q_head *head);
+
 /*
  * sched-domains (multiprocessor balancing) declarations:
  */
@@ -1334,8 +1399,6 @@ struct task_struct {
        int rcu_read_lock_nesting;
        union rcu_special rcu_read_unlock_special;
        struct list_head rcu_node_entry;
-#endif /* #ifdef CONFIG_PREEMPT_RCU */
-#ifdef CONFIG_PREEMPT_RCU
        struct rcu_node *rcu_blocked_node;
 #endif /* #ifdef CONFIG_PREEMPT_RCU */
 #ifdef CONFIG_TASKS_RCU
@@ -1356,9 +1419,6 @@ struct task_struct {
 #endif
 
        struct mm_struct *mm, *active_mm;
-#ifdef CONFIG_COMPAT_BRK
-       unsigned brk_randomized:1;
-#endif
        /* per-thread vma caching */
        u32 vmacache_seqnum;
        struct vm_area_struct *vmacache[VMACACHE_SIZE];
@@ -1369,7 +1429,7 @@ struct task_struct {
        int exit_state;
        int exit_code, exit_signal;
        int pdeath_signal;  /*  The signal sent when the parent dies  */
-       unsigned int jobctl;    /* JOBCTL_*, siglock protected */
+       unsigned long jobctl;   /* JOBCTL_*, siglock protected */
 
        /* Used for emulating ABI behavior of previous Linux versions */
        unsigned int personality;
@@ -1381,10 +1441,14 @@ struct task_struct {
        /* Revert to default priority/policy when forking */
        unsigned sched_reset_on_fork:1;
        unsigned sched_contributes_to_load:1;
+       unsigned sched_migrated:1;
 
 #ifdef CONFIG_MEMCG_KMEM
        unsigned memcg_kmem_skip_account:1;
 #endif
+#ifdef CONFIG_COMPAT_BRK
+       unsigned brk_randomized:1;
+#endif
 
        unsigned long atomic_flags; /* Flags needing atomic access. */
 
@@ -1461,7 +1525,7 @@ struct task_struct {
                                       it with task_lock())
                                     - initialized normally by setup_new_exec */
 /* file system info */
-       int link_count, total_link_count;
+       struct nameidata *nameidata;
 #ifdef CONFIG_SYSVIPC
 /* ipc stuff */
        struct sysv_sem sysvsem;
@@ -1511,6 +1575,8 @@ struct task_struct {
        /* Protection of the PI data structures: */
        raw_spinlock_t pi_lock;
 
+       struct wake_q_node wake_q;
+
 #ifdef CONFIG_RT_MUTEXES
        /* PI waiters blocked on a rt_mutex held by this task */
        struct rb_root pi_waiters;
@@ -1724,6 +1790,7 @@ struct task_struct {
 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
        unsigned long   task_state_change;
 #endif
+       int pagefault_disabled;
 };
 
 /* Future-safe accessor for struct task_struct's cpus_allowed. */
@@ -2077,22 +2144,22 @@ TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
 #define JOBCTL_TRAPPING_BIT    21      /* switching to TRACED */
 #define JOBCTL_LISTENING_BIT   22      /* ptracer is listening for events */
 
-#define JOBCTL_STOP_DEQUEUED   (1 << JOBCTL_STOP_DEQUEUED_BIT)
-#define JOBCTL_STOP_PENDING    (1 << JOBCTL_STOP_PENDING_BIT)
-#define JOBCTL_STOP_CONSUME    (1 << JOBCTL_STOP_CONSUME_BIT)
-#define JOBCTL_TRAP_STOP       (1 << JOBCTL_TRAP_STOP_BIT)
-#define JOBCTL_TRAP_NOTIFY     (1 << JOBCTL_TRAP_NOTIFY_BIT)
-#define JOBCTL_TRAPPING                (1 << JOBCTL_TRAPPING_BIT)
-#define JOBCTL_LISTENING       (1 << JOBCTL_LISTENING_BIT)
+#define JOBCTL_STOP_DEQUEUED   (1UL << JOBCTL_STOP_DEQUEUED_BIT)
+#define JOBCTL_STOP_PENDING    (1UL << JOBCTL_STOP_PENDING_BIT)
+#define JOBCTL_STOP_CONSUME    (1UL << JOBCTL_STOP_CONSUME_BIT)
+#define JOBCTL_TRAP_STOP       (1UL << JOBCTL_TRAP_STOP_BIT)
+#define JOBCTL_TRAP_NOTIFY     (1UL << JOBCTL_TRAP_NOTIFY_BIT)
+#define JOBCTL_TRAPPING                (1UL << JOBCTL_TRAPPING_BIT)
+#define JOBCTL_LISTENING       (1UL << JOBCTL_LISTENING_BIT)
 
 #define JOBCTL_TRAP_MASK       (JOBCTL_TRAP_STOP | JOBCTL_TRAP_NOTIFY)
 #define JOBCTL_PENDING_MASK    (JOBCTL_STOP_PENDING | JOBCTL_TRAP_MASK)
 
 extern bool task_set_jobctl_pending(struct task_struct *task,
-                                   unsigned int mask);
+                                   unsigned long mask);
 extern void task_clear_jobctl_trapping(struct task_struct *task);
 extern void task_clear_jobctl_pending(struct task_struct *task,
-                                     unsigned int mask);
+                                     unsigned long mask);
 
 static inline void rcu_copy_process(struct task_struct *p)
 {
@@ -2965,11 +3032,6 @@ static __always_inline bool need_resched(void)
 void thread_group_cputime(struct task_struct *tsk, struct task_cputime *times);
 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times);
 
-static inline void thread_group_cputime_init(struct signal_struct *sig)
-{
-       raw_spin_lock_init(&sig->cputimer.lock);
-}
-
 /*
  * Reevaluate whether the task has signals pending delivery.
  * Wake the task if so.
@@ -3083,13 +3145,13 @@ static inline void mm_update_next_owner(struct mm_struct *mm)
 static inline unsigned long task_rlimit(const struct task_struct *tsk,
                unsigned int limit)
 {
-       return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_cur);
+       return READ_ONCE(tsk->signal->rlim[limit].rlim_cur);
 }
 
 static inline unsigned long task_rlimit_max(const struct task_struct *tsk,
                unsigned int limit)
 {
-       return ACCESS_ONCE(tsk->signal->rlim[limit].rlim_max);
+       return READ_ONCE(tsk->signal->rlim[limit].rlim_max);
 }
 
 static inline unsigned long rlimit(unsigned int limit)
index 6341f5be6e2474c0a7e30fdd75e3eae5b4286bf3..a30b172df6e1a760905f83c2136ac35f4611320f 100644 (file)
@@ -18,7 +18,7 @@ static inline int rt_task(struct task_struct *p)
 #ifdef CONFIG_RT_MUTEXES
 extern int rt_mutex_getprio(struct task_struct *p);
 extern void rt_mutex_setprio(struct task_struct *p, int prio);
-extern int rt_mutex_check_prio(struct task_struct *task, int newprio);
+extern int rt_mutex_get_effective_prio(struct task_struct *task, int newprio);
 extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task);
 extern void rt_mutex_adjust_pi(struct task_struct *p);
 static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
@@ -31,9 +31,10 @@ static inline int rt_mutex_getprio(struct task_struct *p)
        return p->normal_prio;
 }
 
-static inline int rt_mutex_check_prio(struct task_struct *task, int newprio)
+static inline int rt_mutex_get_effective_prio(struct task_struct *task,
+                                             int newprio)
 {
-       return 0;
+       return newprio;
 }
 
 static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
index 596a0e007c62d97e57d040ee45fa3df784403880..c9e4731cf10b8e97956b160c503e447490991931 100644 (file)
@@ -57,24 +57,12 @@ extern unsigned int sysctl_numa_balancing_scan_size;
 extern unsigned int sysctl_sched_migration_cost;
 extern unsigned int sysctl_sched_nr_migrate;
 extern unsigned int sysctl_sched_time_avg;
-extern unsigned int sysctl_timer_migration;
 extern unsigned int sysctl_sched_shares_window;
 
 int sched_proc_update_handler(struct ctl_table *table, int write,
                void __user *buffer, size_t *length,
                loff_t *ppos);
 #endif
-#ifdef CONFIG_SCHED_DEBUG
-static inline unsigned int get_sysctl_timer_migration(void)
-{
-       return sysctl_timer_migration;
-}
-#else
-static inline unsigned int get_sysctl_timer_migration(void)
-{
-       return 1;
-}
-#endif
 
 /*
  *  control realtime throttling:
index 18264ea9e314153488f9726b530993658c4cea25..52febde524794f5b0201ceba920c4c695edcf8e3 100644 (file)
@@ -43,7 +43,6 @@ struct file;
 struct vfsmount;
 struct path;
 struct qstr;
-struct nameidata;
 struct iattr;
 struct fown_struct;
 struct file_operations;
@@ -477,7 +476,8 @@ static inline void security_free_mnt_opts(struct security_mnt_opts *opts)
  * @inode_follow_link:
  *     Check permission to follow a symbolic link when looking up a pathname.
  *     @dentry contains the dentry structure for the link.
- *     @nd contains the nameidata structure for the parent directory.
+ *     @inode contains the inode, which itself is not stable in RCU-walk
+ *     @rcu indicates whether we are in RCU-walk mode.
  *     Return 0 if permission is granted.
  * @inode_permission:
  *     Check permission before accessing an inode.  This hook is called by the
@@ -1553,7 +1553,8 @@ struct security_operations {
        int (*inode_rename) (struct inode *old_dir, struct dentry *old_dentry,
                             struct inode *new_dir, struct dentry *new_dentry);
        int (*inode_readlink) (struct dentry *dentry);
-       int (*inode_follow_link) (struct dentry *dentry, struct nameidata *nd);
+       int (*inode_follow_link) (struct dentry *dentry, struct inode *inode,
+                                 bool rcu);
        int (*inode_permission) (struct inode *inode, int mask);
        int (*inode_setattr)    (struct dentry *dentry, struct iattr *attr);
        int (*inode_getattr) (const struct path *path);
@@ -1839,7 +1840,8 @@ int security_inode_rename(struct inode *old_dir, struct dentry *old_dentry,
                          struct inode *new_dir, struct dentry *new_dentry,
                          unsigned int flags);
 int security_inode_readlink(struct dentry *dentry);
-int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd);
+int security_inode_follow_link(struct dentry *dentry, struct inode *inode,
+                              bool rcu);
 int security_inode_permission(struct inode *inode, int mask);
 int security_inode_setattr(struct dentry *dentry, struct iattr *attr);
 int security_inode_getattr(const struct path *path);
@@ -2242,7 +2244,8 @@ static inline int security_inode_readlink(struct dentry *dentry)
 }
 
 static inline int security_inode_follow_link(struct dentry *dentry,
-                                             struct nameidata *nd)
+                                            struct inode *inode,
+                                            bool rcu)
 {
        return 0;
 }
index 5f68d0a391cee8506f8e0d94cda72d8bd357b10f..486e685a226a82d5cb841e61fb2ebf1562c5adb7 100644 (file)
@@ -233,6 +233,47 @@ static inline void raw_write_seqcount_end(seqcount_t *s)
        s->sequence++;
 }
 
+/**
+ * raw_write_seqcount_barrier - do a seq write barrier
+ * @s: pointer to seqcount_t
+ *
+ * This can be used to provide an ordering guarantee instead of the
+ * usual consistency guarantee. It is one wmb cheaper, because we can
+ * collapse the two back-to-back wmb()s.
+ *
+ *      seqcount_t seq;
+ *      bool X = true, Y = false;
+ *
+ *      void read(void)
+ *      {
+ *              bool x, y;
+ *
+ *              do {
+ *                      int s = read_seqcount_begin(&seq);
+ *
+ *                      x = X; y = Y;
+ *
+ *              } while (read_seqcount_retry(&seq, s));
+ *
+ *              BUG_ON(!x && !y);
+ *      }
+ *
+ *      void write(void)
+ *      {
+ *              Y = true;
+ *
+ *              raw_write_seqcount_barrier(seq);
+ *
+ *              X = false;
+ *      }
+ */
+static inline void raw_write_seqcount_barrier(seqcount_t *s)
+{
+       s->sequence++;
+       smp_wmb();
+       s->sequence++;
+}
+
 /*
  * raw_write_seqcount_latch - redirect readers to even/odd copy
  * @s: pointer to seqcount_t
@@ -266,13 +307,13 @@ static inline void write_seqcount_end(seqcount_t *s)
 }
 
 /**
- * write_seqcount_barrier - invalidate in-progress read-side seq operations
+ * write_seqcount_invalidate - invalidate in-progress read-side seq operations
  * @s: pointer to seqcount_t
  *
- * After write_seqcount_barrier, no read-side seq operations will complete
+ * After write_seqcount_invalidate, no read-side seq operations will complete
  * successfully and see data older than this.
  */
-static inline void write_seqcount_barrier(seqcount_t *s)
+static inline void write_seqcount_invalidate(seqcount_t *s)
 {
        smp_wmb();
        s->sequence+=2;
index 66e374d62f64347025ed0a0bc7dade0744e16818..f15154a879c711870ba649f867fc6eeb47212f14 100644 (file)
@@ -176,6 +176,7 @@ struct nf_bridge_info {
        struct net_device       *physindev;
        struct net_device       *physoutdev;
        char                    neigh_header[8];
+       __be32                  ipv4_daddr;
 };
 #endif
 
index 3e18379dfa6f349ba48edfc6615232af41ccfb99..0063b24b4f36df594b3587daadfdaf8849192c7d 100644 (file)
@@ -120,7 +120,7 @@ do {                                                                \
 /*
  * Despite its name it doesn't necessarily has to be a full barrier.
  * It should only guarantee that a STORE before the critical section
- * can not be reordered with a LOAD inside this section.
+ * can not be reordered with LOADs and STOREs inside this section.
  * spin_lock() is the one-way barrier, this LOAD can not escape out
  * of the region. So the default implementation simply ensures that
  * a STORE can not move into the critical section, smp_wmb() should
index 0caa3a2d4106eab0137d20ac75518af7964281ba..e8bbf403618f47931e1b32d4ade97b06465fc982 100644 (file)
@@ -145,11 +145,21 @@ struct tcp_sock {
  *     read the code and the spec side by side (and laugh ...)
  *     See RFC793 and RFC1122. The RFC writes these in capitals.
  */
+       u64     bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived
+                                * sum(delta(rcv_nxt)), or how many bytes
+                                * were acked.
+                                */
        u32     rcv_nxt;        /* What we want to receive next         */
        u32     copied_seq;     /* Head of yet unread data              */
        u32     rcv_wup;        /* rcv_nxt on last window update sent   */
        u32     snd_nxt;        /* Next sequence we send                */
 
+       u64     bytes_acked;    /* RFC4898 tcpEStatsAppHCThruOctetsAcked
+                                * sum(delta(snd_una)), or how many bytes
+                                * were acked.
+                                */
+       struct u64_stats_sync syncp; /* protects 64bit vars (cf tcp_get_info()) */
+
        u32     snd_una;        /* First byte we want an ack for        */
        u32     snd_sml;        /* Last byte of the most recently transmitted small packet */
        u32     rcv_tstamp;     /* timestamp of last received ACK (for keepalives) */
index a3831478d9cf8db848a193fc908f7e54f340ffa8..77b5df2acd2adde021954ae8275b9f39d19c7c0f 100644 (file)
@@ -2,6 +2,7 @@
 #define _LINUX_TIME64_H
 
 #include <uapi/linux/time.h>
+#include <linux/math64.h>
 
 typedef __s64 time64_t;
 
@@ -28,6 +29,7 @@ struct timespec64 {
 #define FSEC_PER_SEC   1000000000000000LL
 
 /* Located here for timespec[64]_valid_strict */
+#define TIME64_MAX                     ((s64)~((u64)1 << 63))
 #define KTIME_MAX                      ((s64)~((u64)1 << 63))
 #define KTIME_SEC_MAX                  (KTIME_MAX / NSEC_PER_SEC)
 
index fb86963859c772846dfc531fc9cc8c0825f36ac7..25247220b4b7ddf3f336077b105ab42271475a05 100644 (file)
@@ -49,6 +49,8 @@ struct tk_read_base {
  * @offs_boot:         Offset clock monotonic -> clock boottime
  * @offs_tai:          Offset clock monotonic -> clock tai
  * @tai_offset:                The current UTC to TAI offset in seconds
+ * @clock_was_set_seq: The sequence number of clock was set events
+ * @next_leap_ktime:   CLOCK_MONOTONIC time value of a pending leap-second
  * @raw_time:          Monotonic raw base time in timespec64 format
  * @cycle_interval:    Number of clock cycles in one NTP interval
  * @xtime_interval:    Number of clock shifted nano seconds in one NTP
@@ -60,6 +62,9 @@ struct tk_read_base {
  *                     shifted nano seconds.
  * @ntp_error_shift:   Shift conversion between clock shifted nano seconds and
  *                     ntp shifted nano seconds.
+ * @last_warning:      Warning ratelimiter (DEBUG_TIMEKEEPING)
+ * @underflow_seen:    Underflow warning flag (DEBUG_TIMEKEEPING)
+ * @overflow_seen:     Overflow warning flag (DEBUG_TIMEKEEPING)
  *
  * Note: For timespec(64) based interfaces wall_to_monotonic is what
  * we need to add to xtime (or xtime corrected for sub jiffie times)
@@ -85,6 +90,8 @@ struct timekeeper {
        ktime_t                 offs_boot;
        ktime_t                 offs_tai;
        s32                     tai_offset;
+       unsigned int            clock_was_set_seq;
+       ktime_t                 next_leap_ktime;
        struct timespec64       raw_time;
 
        /* The following members are for timekeeping internal use */
@@ -104,6 +111,18 @@ struct timekeeper {
        s64                     ntp_error;
        u32                     ntp_error_shift;
        u32                     ntp_err_mult;
+#ifdef CONFIG_DEBUG_TIMEKEEPING
+       long                    last_warning;
+       /*
+        * These simple flag variables are managed
+        * without locks, which is racy, but they are
+        * ok since we don't really care about being
+        * super precise about how many events were
+        * seen, just that a problem was observed.
+        */
+       int                     underflow_seen;
+       int                     overflow_seen;
+#endif
 };
 
 #ifdef CONFIG_GENERIC_TIME_VSYSCALL
index 99176af216af449563e3a190b96edc04ea1a1f9e..3aa72e64865021ef0efd15c0511fee55a520c19e 100644 (file)
@@ -163,6 +163,7 @@ extern ktime_t ktime_get(void);
 extern ktime_t ktime_get_with_offset(enum tk_offsets offs);
 extern ktime_t ktime_mono_to_any(ktime_t tmono, enum tk_offsets offs);
 extern ktime_t ktime_get_raw(void);
+extern u32 ktime_get_resolution_ns(void);
 
 /**
  * ktime_get_real - get the real (wall-) time in ktime_t format
@@ -266,7 +267,6 @@ extern int persistent_clock_is_local;
 
 extern void read_persistent_clock(struct timespec *ts);
 extern void read_persistent_clock64(struct timespec64 *ts);
-extern void read_boot_clock(struct timespec *ts);
 extern void read_boot_clock64(struct timespec64 *ts);
 extern int update_persistent_clock(struct timespec now);
 extern int update_persistent_clock64(struct timespec64 now);
index 8c5a197e1587de4c647ff205b5b591c31a0dbcc6..61aa61dc410cf5035beb63c2873a471be4c50372 100644 (file)
@@ -14,27 +14,23 @@ struct timer_list {
         * All fields that change during normal runtime grouped to the
         * same cacheline
         */
-       struct list_head entry;
-       unsigned long expires;
-       struct tvec_base *base;
-
-       void (*function)(unsigned long);
-       unsigned long data;
-
-       int slack;
+       struct hlist_node       entry;
+       unsigned long           expires;
+       void                    (*function)(unsigned long);
+       unsigned long           data;
+       u32                     flags;
+       int                     slack;
 
 #ifdef CONFIG_TIMER_STATS
-       int start_pid;
-       void *start_site;
-       char start_comm[16];
+       int                     start_pid;
+       void                    *start_site;
+       char                    start_comm[16];
 #endif
 #ifdef CONFIG_LOCKDEP
-       struct lockdep_map lockdep_map;
+       struct lockdep_map      lockdep_map;
 #endif
 };
 
-extern struct tvec_base boot_tvec_bases;
-
 #ifdef CONFIG_LOCKDEP
 /*
  * NB: because we have to copy the lockdep_map, setting the lockdep_map key
@@ -49,9 +45,6 @@ extern struct tvec_base boot_tvec_bases;
 #endif
 
 /*
- * Note that all tvec_bases are at least 4 byte aligned and lower two bits
- * of base in timer_list is guaranteed to be zero. Use them for flags.
- *
  * A deferrable timer will work normally when the system is busy, but
  * will not cause a CPU to come out of idle just to service it; instead,
  * the timer will be serviced when the CPU eventually wakes up with a
@@ -65,17 +58,18 @@ extern struct tvec_base boot_tvec_bases;
  * workqueue locking issues. It's not meant for executing random crap
  * with interrupts disabled. Abuse is monitored!
  */
-#define TIMER_DEFERRABLE               0x1LU
-#define TIMER_IRQSAFE                  0x2LU
-
-#define TIMER_FLAG_MASK                        0x3LU
+#define TIMER_CPUMASK          0x0007FFFF
+#define TIMER_MIGRATING                0x00080000
+#define TIMER_BASEMASK         (TIMER_CPUMASK | TIMER_MIGRATING)
+#define TIMER_DEFERRABLE       0x00100000
+#define TIMER_IRQSAFE          0x00200000
 
 #define __TIMER_INITIALIZER(_function, _expires, _data, _flags) { \
-               .entry = { .prev = TIMER_ENTRY_STATIC },        \
+               .entry = { .next = TIMER_ENTRY_STATIC },        \
                .function = (_function),                        \
                .expires = (_expires),                          \
                .data = (_data),                                \
-               .base = (void *)((unsigned long)&boot_tvec_bases + (_flags)), \
+               .flags = (_flags),                              \
                .slack = -1,                                    \
                __TIMER_LOCKDEP_MAP_INITIALIZER(                \
                        __FILE__ ":" __stringify(__LINE__))     \
@@ -168,7 +162,7 @@ static inline void init_timer_on_stack_key(struct timer_list *timer,
  */
 static inline int timer_pending(const struct timer_list * timer)
 {
-       return timer->entry.next != NULL;
+       return timer->entry.pprev != NULL;
 }
 
 extern void add_timer_on(struct timer_list *timer, int cpu);
@@ -187,13 +181,6 @@ extern void set_timer_slack(struct timer_list *time, int slack_hz);
  */
 #define NEXT_TIMER_MAX_DELTA   ((1UL << 30) - 1)
 
-/*
- * Return when the next timer-wheel timeout occurs (in absolute jiffies),
- * locks the timer base and does the comparison against the given
- * jiffie.
- */
-extern unsigned long get_next_timer_interrupt(unsigned long now);
-
 /*
  * Timer-statistics info:
  */
@@ -201,13 +188,10 @@ extern unsigned long get_next_timer_interrupt(unsigned long now);
 
 extern int timer_stats_active;
 
-#define TIMER_STATS_FLAG_DEFERRABLE    0x1
-
 extern void init_timer_stats(void);
 
 extern void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
-                                    void *timerf, char *comm,
-                                    unsigned int timer_flag);
+                                    void *timerf, char *comm, u32 flags);
 
 extern void __timer_stats_timer_set_start_info(struct timer_list *timer,
                                               void *addr);
@@ -254,6 +238,15 @@ extern void run_local_timers(void);
 struct hrtimer;
 extern enum hrtimer_restart it_real_fn(struct hrtimer *);
 
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+#include <linux/sysctl.h>
+
+extern unsigned int sysctl_timer_migration;
+int timer_migration_handler(struct ctl_table *table, int write,
+                           void __user *buffer, size_t *lenp,
+                           loff_t *ppos);
+#endif
+
 unsigned long __round_jiffies(unsigned long j, int cpu);
 unsigned long __round_jiffies_relative(unsigned long j, int cpu);
 unsigned long round_jiffies(unsigned long j);
index a520fd70a59f371f40a34f79e883dcf7b32c23f7..7eec17ad7fa195bba39c06e44817ef3f3a1b0402 100644 (file)
@@ -16,10 +16,10 @@ struct timerqueue_head {
 };
 
 
-extern void timerqueue_add(struct timerqueue_head *head,
-                               struct timerqueue_node *node);
-extern void timerqueue_del(struct timerqueue_head *head,
-                               struct timerqueue_node *node);
+extern bool timerqueue_add(struct timerqueue_head *head,
+                          struct timerqueue_node *node);
+extern bool timerqueue_del(struct timerqueue_head *head,
+                          struct timerqueue_node *node);
 extern struct timerqueue_node *timerqueue_iterate_next(
                                                struct timerqueue_node *node);
 
index 909b6e43b6942c2a7372314627e19d1bd0f72c7c..73ddad1e0fa3435ffcd567dbbcfeb01fe296a014 100644 (file)
@@ -191,8 +191,8 @@ static inline int cpu_to_mem(int cpu)
 #ifndef topology_core_id
 #define topology_core_id(cpu)                  ((void)(cpu), 0)
 #endif
-#ifndef topology_thread_cpumask
-#define topology_thread_cpumask(cpu)           cpumask_of(cpu)
+#ifndef topology_sibling_cpumask
+#define topology_sibling_cpumask(cpu)          cpumask_of(cpu)
 #endif
 #ifndef topology_core_cpumask
 #define topology_core_cpumask(cpu)             cpumask_of(cpu)
@@ -201,7 +201,7 @@ static inline int cpu_to_mem(int cpu)
 #ifdef CONFIG_SCHED_SMT
 static inline const struct cpumask *cpu_smt_mask(int cpu)
 {
-       return topology_thread_cpumask(cpu);
+       return topology_sibling_cpumask(cpu);
 }
 #endif
 
index fe5623c9af715fda985da02bcecb1c9451b3789c..d76631f615c224ac29e64198050279aa01e97880 100644 (file)
@@ -339,6 +339,7 @@ struct tty_file_private {
 #define TTY_EXCLUSIVE          3       /* Exclusive open mode */
 #define TTY_DEBUG              4       /* Debugging */
 #define TTY_DO_WRITE_WAKEUP    5       /* Call write_wakeup after queuing new */
+#define TTY_OTHER_DONE         6       /* Closed pty has completed input processing */
 #define TTY_LDISC_OPEN         11      /* Line discipline is open */
 #define TTY_PTY_LOCK           16      /* pty private */
 #define TTY_NO_WRITE_SPLIT     17      /* Preserve write boundaries to driver */
@@ -462,7 +463,6 @@ extern int tty_hung_up_p(struct file *filp);
 extern void do_SAK(struct tty_struct *tty);
 extern void __do_SAK(struct tty_struct *tty);
 extern void no_tty(void);
-extern void tty_flush_to_ldisc(struct tty_struct *tty);
 extern void tty_buffer_free_all(struct tty_port *port);
 extern void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld);
 extern void tty_buffer_init(struct tty_port *port);
index ecd3319dac33140a21a7c8fd89a2c95c18ce9e42..ae572c1386073cce6c57807c47dc3d4694af0f03 100644 (file)
@@ -1,21 +1,30 @@
 #ifndef __LINUX_UACCESS_H__
 #define __LINUX_UACCESS_H__
 
-#include <linux/preempt.h>
+#include <linux/sched.h>
 #include <asm/uaccess.h>
 
+static __always_inline void pagefault_disabled_inc(void)
+{
+       current->pagefault_disabled++;
+}
+
+static __always_inline void pagefault_disabled_dec(void)
+{
+       current->pagefault_disabled--;
+       WARN_ON(current->pagefault_disabled < 0);
+}
+
 /*
- * These routines enable/disable the pagefault handler in that
- * it will not take any locks and go straight to the fixup table.
+ * These routines enable/disable the pagefault handler. If disabled, it will
+ * not take any locks and go straight to the fixup table.
  *
- * They have great resemblance to the preempt_disable/enable calls
- * and in fact they are identical; this is because currently there is
- * no other way to make the pagefault handlers do this. So we do
- * disable preemption but we don't necessarily care about that.
+ * User access methods will not sleep when called from a pagefault_disabled()
+ * environment.
  */
 static inline void pagefault_disable(void)
 {
-       preempt_count_inc();
+       pagefault_disabled_inc();
        /*
         * make sure to have issued the store before a pagefault
         * can hit.
@@ -25,18 +34,31 @@ static inline void pagefault_disable(void)
 
 static inline void pagefault_enable(void)
 {
-#ifndef CONFIG_PREEMPT
        /*
         * make sure to issue those last loads/stores before enabling
         * the pagefault handler again.
         */
        barrier();
-       preempt_count_dec();
-#else
-       preempt_enable();
-#endif
+       pagefault_disabled_dec();
 }
 
+/*
+ * Is the pagefault handler disabled? If so, user access methods will not sleep.
+ */
+#define pagefault_disabled() (current->pagefault_disabled != 0)
+
+/*
+ * The pagefault handler is in general disabled by pagefault_disable() or
+ * when in irq context (via in_atomic()).
+ *
+ * This function should only be used by the fault handlers. Other users should
+ * stick to pagefault_disabled().
+ * Please NEVER use preempt_disable() to disable the fault handler. With
+ * !CONFIG_PREEMPT_COUNT, this is like a NOP. So the handler won't be disabled.
+ * in_atomic() will report different values based on !CONFIG_PREEMPT_COUNT.
+ */
+#define faulthandler_disabled() (pagefault_disabled() || in_atomic())
+
 #ifndef ARCH_HAS_NOCACHE_UACCESS
 
 static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
index 0ee05da3889946c10dd8fabaeb50e59cc20739c3..03835522dfcb68ab830a9d38768c374a99ec131d 100644 (file)
@@ -109,12 +109,12 @@ static inline bool gid_lte(kgid_t left, kgid_t right)
 
 static inline bool uid_valid(kuid_t uid)
 {
-       return !uid_eq(uid, INVALID_UID);
+       return __kuid_val(uid) != (uid_t) -1;
 }
 
 static inline bool gid_valid(kgid_t gid)
 {
-       return !gid_eq(gid, INVALID_GID);
+       return __kgid_val(gid) != (gid_t) -1;
 }
 
 #ifdef CONFIG_USER_NS
index 2db83349865bb7a27eba8438a9e8a3cd8509abd0..d69ac4ecc88b9c0d6ff4d5f97cf0fa9d89b5fcdc 100644 (file)
@@ -969,7 +969,7 @@ extern int bit_wait_io_timeout(struct wait_bit_key *);
  * on that signal.
  */
 static inline int
-wait_on_bit(void *word, int bit, unsigned mode)
+wait_on_bit(unsigned long *word, int bit, unsigned mode)
 {
        might_sleep();
        if (!test_bit(bit, word))
@@ -994,7 +994,7 @@ wait_on_bit(void *word, int bit, unsigned mode)
  * on that signal.
  */
 static inline int
-wait_on_bit_io(void *word, int bit, unsigned mode)
+wait_on_bit_io(unsigned long *word, int bit, unsigned mode)
 {
        might_sleep();
        if (!test_bit(bit, word))
@@ -1020,7 +1020,8 @@ wait_on_bit_io(void *word, int bit, unsigned mode)
  * received a signal and the mode permitted wakeup on that signal.
  */
 static inline int
-wait_on_bit_timeout(void *word, int bit, unsigned mode, unsigned long timeout)
+wait_on_bit_timeout(unsigned long *word, int bit, unsigned mode,
+                   unsigned long timeout)
 {
        might_sleep();
        if (!test_bit(bit, word))
@@ -1047,7 +1048,8 @@ wait_on_bit_timeout(void *word, int bit, unsigned mode, unsigned long timeout)
  * on that signal.
  */
 static inline int
-wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
+wait_on_bit_action(unsigned long *word, int bit, wait_bit_action_f *action,
+                  unsigned mode)
 {
        might_sleep();
        if (!test_bit(bit, word))
@@ -1075,7 +1077,7 @@ wait_on_bit_action(void *word, int bit, wait_bit_action_f *action, unsigned mode
  * the @mode allows that signal to wake the process.
  */
 static inline int
-wait_on_bit_lock(void *word, int bit, unsigned mode)
+wait_on_bit_lock(unsigned long *word, int bit, unsigned mode)
 {
        might_sleep();
        if (!test_and_set_bit(bit, word))
@@ -1099,7 +1101,7 @@ wait_on_bit_lock(void *word, int bit, unsigned mode)
  * the @mode allows that signal to wake the process.
  */
 static inline int
-wait_on_bit_lock_io(void *word, int bit, unsigned mode)
+wait_on_bit_lock_io(unsigned long *word, int bit, unsigned mode)
 {
        might_sleep();
        if (!test_and_set_bit(bit, word))
@@ -1125,7 +1127,8 @@ wait_on_bit_lock_io(void *word, int bit, unsigned mode)
  * the @mode allows that signal to wake the process.
  */
 static inline int
-wait_on_bit_lock_action(void *word, int bit, wait_bit_action_f *action, unsigned mode)
+wait_on_bit_lock_action(unsigned long *word, int bit, wait_bit_action_f *action,
+                       unsigned mode)
 {
        might_sleep();
        if (!test_and_set_bit(bit, word))
index eeda67652766a9979478de97105322b5ee6a7977..6ea16c84293b0cdcb981df77481a4fa5e509fdde 100644 (file)
@@ -30,11 +30,13 @@ struct wpan_phy_cca;
 struct cfg802154_ops {
        struct net_device * (*add_virtual_intf_deprecated)(struct wpan_phy *wpan_phy,
                                                           const char *name,
+                                                          unsigned char name_assign_type,
                                                           int type);
        void    (*del_virtual_intf_deprecated)(struct wpan_phy *wpan_phy,
                                               struct net_device *dev);
        int     (*add_virtual_intf)(struct wpan_phy *wpan_phy,
                                    const char *name,
+                                   unsigned char name_assign_type,
                                    enum nl802154_iftype type,
                                    __le64 extended_addr);
        int     (*del_virtual_intf)(struct wpan_phy *wpan_phy,
index aeee28081245c9215f10badd611f58ba0124fcd0..1e18005f7f65f061f6084ea1823a8d37368a57e4 100644 (file)
@@ -120,11 +120,13 @@ static inline u32 codel_time_to_us(codel_time_t val)
  * struct codel_params - contains codel parameters
  * @target:    target queue size (in time units)
  * @interval:  width of moving time window
+ * @mtu:       device mtu, or minimal queue backlog in bytes.
  * @ecn:       is Explicit Congestion Notification enabled
  */
 struct codel_params {
        codel_time_t    target;
        codel_time_t    interval;
+       u32             mtu;
        bool            ecn;
 };
 
@@ -166,10 +168,12 @@ struct codel_stats {
        u32             ecn_mark;
 };
 
-static void codel_params_init(struct codel_params *params)
+static void codel_params_init(struct codel_params *params,
+                             const struct Qdisc *sch)
 {
        params->interval = MS2TIME(100);
        params->target = MS2TIME(5);
+       params->mtu = psched_mtu(qdisc_dev(sch));
        params->ecn = false;
 }
 
@@ -180,7 +184,7 @@ static void codel_vars_init(struct codel_vars *vars)
 
 static void codel_stats_init(struct codel_stats *stats)
 {
-       stats->maxpacket = 256;
+       stats->maxpacket = 0;
 }
 
 /*
@@ -234,7 +238,7 @@ static bool codel_should_drop(const struct sk_buff *skb,
                stats->maxpacket = qdisc_pkt_len(skb);
 
        if (codel_time_before(vars->ldelay, params->target) ||
-           sch->qstats.backlog <= stats->maxpacket) {
+           sch->qstats.backlog <= params->mtu) {
                /* went below - stay below for at least interval */
                vars->first_above_time = 0;
                return false;
index 48a8158235874b1625c65651b0bd92eedd999fe5..0320bbb7d7b5a1987e7b85f6842cf9fa145d91c5 100644 (file)
@@ -98,7 +98,8 @@ struct inet_connection_sock {
        const struct tcp_congestion_ops *icsk_ca_ops;
        const struct inet_connection_sock_af_ops *icsk_af_ops;
        unsigned int              (*icsk_sync_mss)(struct sock *sk, u32 pmtu);
-       __u8                      icsk_ca_state:7,
+       __u8                      icsk_ca_state:6,
+                                 icsk_ca_setsockopt:1,
                                  icsk_ca_dst_locked:1;
        __u8                      icsk_retransmits;
        __u8                      icsk_pending;
@@ -129,9 +130,10 @@ struct inet_connection_sock {
 
                u32               probe_timestamp;
        } icsk_mtup;
-       u32                       icsk_ca_priv[16];
        u32                       icsk_user_timeout;
-#define ICSK_CA_PRIV_SIZE      (16 * sizeof(u32))
+
+       u64                       icsk_ca_priv[64 / sizeof(u64)];
+#define ICSK_CA_PRIV_SIZE      (8 * sizeof(u64))
 };
 
 #define ICSK_TIME_RETRANS      1       /* Retransmit timer */
index b4bef1152c05c52b87dc877e9d4e8b09050842f7..fc57f6b82fc59e4dc6ae72b802856ab80e0af6e9 100644 (file)
@@ -354,7 +354,7 @@ enum ieee80211_rssi_event_data {
 };
 
 /**
- * enum ieee80211_rssi_event - data attached to an %RSSI_EVENT
+ * struct ieee80211_rssi_event - data attached to an %RSSI_EVENT
  * @data: See &enum ieee80211_rssi_event_data
  */
 struct ieee80211_rssi_event {
@@ -388,7 +388,7 @@ enum ieee80211_mlme_event_status {
 };
 
 /**
- * enum ieee80211_mlme_event - data attached to an %MLME_EVENT
+ * struct ieee80211_mlme_event - data attached to an %MLME_EVENT
  * @data: See &enum ieee80211_mlme_event_data
  * @status: See &enum ieee80211_mlme_event_status
  * @reason: the reason code if applicable
@@ -401,9 +401,10 @@ struct ieee80211_mlme_event {
 
 /**
  * struct ieee80211_event - event to be sent to the driver
- * @type The event itself. See &enum ieee80211_event_type.
+ * @type: The event itself. See &enum ieee80211_event_type.
  * @rssi: relevant if &type is %RSSI_EVENT
  * @mlme: relevant if &type is %AUTH_EVENT
+ * @u:    union holding the above two fields
  */
 struct ieee80211_event {
        enum ieee80211_event_type type;
@@ -1666,6 +1667,8 @@ struct ieee80211_tx_control {
  * @sta: station table entry, %NULL for per-vif queue
  * @tid: the TID for this queue (unused for per-vif queue)
  * @ac: the AC for this queue
+ * @drv_priv: data area for driver use, will always be aligned to
+ *     sizeof(void *).
  *
  * The driver can obtain packets from this queue by calling
  * ieee80211_tx_dequeue().
index e18e7fd43f47d996613b0c1e7dfe6bc0e636c476..7df28a4c23f98793626371d1e2334ad91f7ebf87 100644 (file)
@@ -247,19 +247,109 @@ static inline void ieee802154_le64_to_be64(void *be64_dst, const void *le64_src)
        __put_unaligned_memmove64(swab64p(le64_src), be64_dst);
 }
 
-/* Basic interface to register ieee802154 device */
+/**
+ * ieee802154_alloc_hw - Allocate a new hardware device
+ *
+ * This must be called once for each hardware device. The returned pointer
+ * must be used to refer to this device when calling other functions.
+ * mac802154 allocates a private data area for the driver pointed to by
+ * @priv in &struct ieee802154_hw, the size of this area is given as
+ * @priv_data_len.
+ *
+ * @priv_data_len: length of private data
+ * @ops: callbacks for this device
+ *
+ * Return: A pointer to the new hardware device, or %NULL on error.
+ */
 struct ieee802154_hw *
 ieee802154_alloc_hw(size_t priv_data_len, const struct ieee802154_ops *ops);
+
+/**
+ * ieee802154_free_hw - free hardware descriptor
+ *
+ * This function frees everything that was allocated, including the
+ * private data for the driver. You must call ieee802154_unregister_hw()
+ * before calling this function.
+ *
+ * @hw: the hardware to free
+ */
 void ieee802154_free_hw(struct ieee802154_hw *hw);
+
+/**
+ * ieee802154_register_hw - Register hardware device
+ *
+ * You must call this function before any other functions in
+ * mac802154. Note that before a hardware can be registered, you
+ * need to fill the contained wpan_phy's information.
+ *
+ * @hw: the device to register as returned by ieee802154_alloc_hw()
+ *
+ * Return: 0 on success. An error code otherwise.
+ */
 int ieee802154_register_hw(struct ieee802154_hw *hw);
+
+/**
+ * ieee802154_unregister_hw - Unregister a hardware device
+ *
+ * This function instructs mac802154 to free allocated resources
+ * and unregister netdevices from the networking subsystem.
+ *
+ * @hw: the hardware to unregister
+ */
 void ieee802154_unregister_hw(struct ieee802154_hw *hw);
 
+/**
+ * ieee802154_rx - receive frame
+ *
+ * Use this function to hand received frames to mac802154. The receive
+ * buffer in @skb must start with an IEEE 802.15.4 header. In case of a
+ * paged @skb is used, the driver is recommended to put the ieee802154
+ * header of the frame on the linear part of the @skb to avoid memory
+ * allocation and/or memcpy by the stack.
+ *
+ * This function may not be called in IRQ context. Calls to this function
+ * for a single hardware must be synchronized against each other.
+ *
+ * @hw: the hardware this frame came in on
+ * @skb: the buffer to receive, owned by mac802154 after this call
+ */
 void ieee802154_rx(struct ieee802154_hw *hw, struct sk_buff *skb);
+
+/**
+ * ieee802154_rx_irqsafe - receive frame
+ *
+ * Like ieee802154_rx() but can be called in IRQ context
+ * (internally defers to a tasklet.)
+ *
+ * @hw: the hardware this frame came in on
+ * @skb: the buffer to receive, owned by mac802154 after this call
+ * @lqi: link quality indicator
+ */
 void ieee802154_rx_irqsafe(struct ieee802154_hw *hw, struct sk_buff *skb,
                           u8 lqi);
-
+/**
+ * ieee802154_wake_queue - wake ieee802154 queue
+ * @hw: pointer as obtained from ieee802154_alloc_hw().
+ *
+ * Drivers should use this function instead of netif_wake_queue.
+ */
 void ieee802154_wake_queue(struct ieee802154_hw *hw);
+
+/**
+ * ieee802154_stop_queue - stop ieee802154 queue
+ * @hw: pointer as obtained from ieee802154_alloc_hw().
+ *
+ * Drivers should use this function instead of netif_stop_queue.
+ */
 void ieee802154_stop_queue(struct ieee802154_hw *hw);
+
+/**
+ * ieee802154_xmit_complete - frame transmission complete
+ *
+ * @hw: pointer as obtained from ieee802154_alloc_hw().
+ * @skb: buffer for transmission
+ * @ifs_handling: indicate interframe space handling
+ */
 void ieee802154_xmit_complete(struct ieee802154_hw *hw, struct sk_buff *skb,
                              bool ifs_handling);
 
index c56a438c3a1eaf89d630edb17dd20802f0e01590..ce13cf20f6253e866f52534b7e7dc10e5bac1a0e 100644 (file)
@@ -574,11 +574,14 @@ static inline void sctp_v6_map_v4(union sctp_addr *addr)
 /* Map v4 address to v4-mapped v6 address */
 static inline void sctp_v4_map_v6(union sctp_addr *addr)
 {
+       __be16 port;
+
+       port = addr->v4.sin_port;
+       addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
+       addr->v6.sin6_port = port;
        addr->v6.sin6_family = AF_INET6;
        addr->v6.sin6_flowinfo = 0;
        addr->v6.sin6_scope_id = 0;
-       addr->v6.sin6_port = addr->v4.sin_port;
-       addr->v6.sin6_addr.s6_addr32[3] = addr->v4.sin_addr.s_addr;
        addr->v6.sin6_addr.s6_addr32[0] = 0;
        addr->v6.sin6_addr.s6_addr32[1] = 0;
        addr->v6.sin6_addr.s6_addr32[2] = htonl(0x0000ffff);
index 051dc5c2802d3296f8b49d4b40911c0c22345262..6d204f3f9df8cafb82d856db08769a7d24dfd79e 100644 (file)
@@ -576,7 +576,7 @@ static inline int tcp_bound_to_half_wnd(struct tcp_sock *tp, int pktsize)
 }
 
 /* tcp.c */
-void tcp_get_info(const struct sock *, struct tcp_info *);
+void tcp_get_info(struct sock *, struct tcp_info *);
 
 /* Read 'sendfile()'-style from a TCP socket */
 typedef int (*sk_read_actor_t)(read_descriptor_t *, struct sk_buff *,
@@ -804,6 +804,8 @@ enum tcp_ca_ack_event_flags {
 /* Requires ECN/ECT set on all packets */
 #define TCP_CONG_NEEDS_ECN     0x2
 
+union tcp_cc_info;
+
 struct tcp_congestion_ops {
        struct list_head        list;
        u32 key;
@@ -829,7 +831,8 @@ struct tcp_congestion_ops {
        /* hook for packet ack accounting (optional) */
        void (*pkts_acked)(struct sock *sk, u32 num_acked, s32 rtt_us);
        /* get info for inet_diag (optional) */
-       int (*get_info)(struct sock *sk, u32 ext, struct sk_buff *skb);
+       size_t (*get_info)(struct sock *sk, u32 ext, int *attr,
+                          union tcp_cc_info *info);
 
        char            name[TCP_CA_NAME_MAX];
        struct module   *owner;
index ce55906b54a0eccc316216ce88f5b012a87c537e..ac54c27a2bfd39a2f5ec950afe57963c57b04a42 100644 (file)
@@ -160,7 +160,7 @@ static inline int rdma_ip2gid(struct sockaddr *addr, union ib_gid *gid)
 }
 
 /* Important - sockaddr should be a union of sockaddr_in and sockaddr_in6 */
-static inline int rdma_gid2ip(struct sockaddr *out, union ib_gid *gid)
+static inline void rdma_gid2ip(struct sockaddr *out, union ib_gid *gid)
 {
        if (ipv6_addr_v4mapped((struct in6_addr *)gid)) {
                struct sockaddr_in *out_in = (struct sockaddr_in *)out;
@@ -173,7 +173,6 @@ static inline int rdma_gid2ip(struct sockaddr *out, union ib_gid *gid)
                out_in->sin6_family = AF_INET6;
                memcpy(&out_in->sin6_addr.s6_addr, gid->raw, 16);
        }
-       return 0;
 }
 
 static inline void iboe_addr_get_sgid(struct rdma_dev_addr *dev_addr,
index 0e3ff30647d518483853cb9c52104991df966221..39ed2d2fbd51452216586b031a3e25d236099169 100644 (file)
@@ -105,7 +105,8 @@ enum ib_cm_data_size {
        IB_CM_SIDR_REQ_PRIVATE_DATA_SIZE = 216,
        IB_CM_SIDR_REP_PRIVATE_DATA_SIZE = 136,
        IB_CM_SIDR_REP_INFO_LENGTH       = 72,
-       IB_CM_COMPARE_SIZE               = 64
+       /* compare done u32 at a time */
+       IB_CM_COMPARE_SIZE               = (64 / sizeof(u32))
 };
 
 struct ib_cm_id;
@@ -337,8 +338,8 @@ void ib_destroy_cm_id(struct ib_cm_id *cm_id);
 #define IB_SDP_SERVICE_ID_MASK cpu_to_be64(0xFFFFFFFFFFFF0000ULL)
 
 struct ib_cm_compare_data {
-       u8  data[IB_CM_COMPARE_SIZE];
-       u8  mask[IB_CM_COMPARE_SIZE];
+       u32  data[IB_CM_COMPARE_SIZE];
+       u32  mask[IB_CM_COMPARE_SIZE];
 };
 
 /**
index 928b2775e99244e7df3dae25f218386b84d1ddf3..fda31673a5628831434991f0a087f8417a9589a2 100644 (file)
@@ -147,6 +147,16 @@ int iwpm_add_mapping_cb(struct sk_buff *, struct netlink_callback *);
  */
 int iwpm_add_and_query_mapping_cb(struct sk_buff *, struct netlink_callback *);
 
+/**
+ * iwpm_remote_info_cb - Process remote connecting peer address info, which
+ *                       the port mapper has received from the connecting peer
+ *
+ * @cb: Contains the received message (payload and netlink header)
+ *
+ * Stores the IPv4/IPv6 address info in a hash table
+ */
+int iwpm_remote_info_cb(struct sk_buff *, struct netlink_callback *);
+
 /**
  * iwpm_mapping_error_cb - Process port mapper notification for error
  *
@@ -174,6 +184,21 @@ int iwpm_mapping_info_cb(struct sk_buff *, struct netlink_callback *);
  */
 int iwpm_ack_mapping_info_cb(struct sk_buff *, struct netlink_callback *);
 
+/**
+ * iwpm_get_remote_info - Get the remote connecting peer address info
+ *
+ * @mapped_loc_addr: Mapped local address of the listening peer
+ * @mapped_rem_addr: Mapped remote address of the connecting peer
+ * @remote_addr: To store the remote address of the connecting peer
+ * @nl_client: The index of the netlink client
+ *
+ * The remote address info is retrieved and provided to the client in
+ * the remote_addr. After that it is removed from the hash table
+ */
+int iwpm_get_remote_info(struct sockaddr_storage *mapped_loc_addr,
+                       struct sockaddr_storage *mapped_rem_addr,
+                       struct sockaddr_storage *remote_addr, u8 nl_client);
+
 /**
  * iwpm_create_mapinfo - Store local and mapped IPv4/IPv6 address
  *                       info in a hash table
index 53a18b3635e24a458700a21566c56a10d0704c56..df705908480aebbf754900731834162fa8097f75 100644 (file)
@@ -9,6 +9,8 @@
 #include <sound/core.h>
 #include <sound/hdaudio.h>
 
+#define AC_AMP_FAKE_MUTE       0x10    /* fake mute bit set to amp verbs */
+
 int snd_hdac_regmap_init(struct hdac_device *codec);
 void snd_hdac_regmap_exit(struct hdac_device *codec);
 int snd_hdac_regmap_add_vendor_verb(struct hdac_device *codec,
index d61be7297b2c88c867acbacadcacfd65045e6c50..5f122570699339f5f5cf85fb14a90e2ad9fc51b7 100644 (file)
@@ -1,9 +1,7 @@
 #ifndef TARGET_CORE_BACKEND_H
 #define TARGET_CORE_BACKEND_H
 
-#define TRANSPORT_PLUGIN_PHBA_PDEV             1
-#define TRANSPORT_PLUGIN_VHBA_PDEV             2
-#define TRANSPORT_PLUGIN_VHBA_VDEV             3
+#define TRANSPORT_FLAG_PASSTHROUGH             1
 
 struct target_backend_cits {
        struct config_item_type tb_dev_cit;
@@ -22,7 +20,7 @@ struct se_subsystem_api {
        char inquiry_rev[4];
        struct module *owner;
 
-       u8 transport_type;
+       u8 transport_flags;
 
        int (*attach_hba)(struct se_hba *, u32);
        void (*detach_hba)(struct se_hba *);
@@ -138,5 +136,7 @@ int se_dev_set_queue_depth(struct se_device *, u32);
 int    se_dev_set_max_sectors(struct se_device *, u32);
 int    se_dev_set_optimal_sectors(struct se_device *, u32);
 int    se_dev_set_block_size(struct se_device *, u32);
+sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
+       sense_reason_t (*exec_cmd)(struct se_cmd *cmd));
 
 #endif /* TARGET_CORE_BACKEND_H */
index 25bb04c4209ed5c42e82bc54d5ba3996194c4bb2..b99c01170392abff3978b6c96d7c1763f06bcc27 100644 (file)
@@ -40,8 +40,6 @@ struct target_fabric_configfs {
        struct config_item      *tf_fabric;
        /* Passed from fabric modules */
        struct config_item_type *tf_fabric_cit;
-       /* Pointer to target core subsystem */
-       struct configfs_subsystem *tf_subsys;
        /* Pointer to fabric's struct module */
        struct module *tf_module;
        struct target_core_fabric_ops tf_ops;
index 17c7f5ac7ea0f5066c6b7f2bae0d66b0021f6358..0f4dc3768587bc2d41370d015c69502757322324 100644 (file)
@@ -4,7 +4,6 @@
 struct target_core_fabric_ops {
        struct module *module;
        const char *name;
-       struct configfs_subsystem *tf_subsys;
        char *(*get_fabric_name)(void);
        u8 (*get_fabric_proto_ident)(struct se_portal_group *);
        char *(*tpg_get_wwn)(struct se_portal_group *);
@@ -109,6 +108,9 @@ struct target_core_fabric_ops {
 int target_register_template(const struct target_core_fabric_ops *fo);
 void target_unregister_template(const struct target_core_fabric_ops *fo);
 
+int target_depend_item(struct config_item *item);
+void target_undepend_item(struct config_item *item);
+
 struct se_session *transport_init_session(enum target_prot_op);
 int transport_alloc_session_tags(struct se_session *, unsigned int,
                unsigned int);
index 81ea598121173bf782c04f7c6bfae63b5207b75e..f7554fd7fc62b92d6b1d73d7db6030599b552014 100644 (file)
@@ -140,19 +140,42 @@ DEFINE_EVENT(kmem_free, kfree,
        TP_ARGS(call_site, ptr)
 );
 
-DEFINE_EVENT(kmem_free, kmem_cache_free,
+DEFINE_EVENT_CONDITION(kmem_free, kmem_cache_free,
 
        TP_PROTO(unsigned long call_site, const void *ptr),
 
-       TP_ARGS(call_site, ptr)
+       TP_ARGS(call_site, ptr),
+
+       /*
+        * This trace can be potentially called from an offlined cpu.
+        * Since trace points use RCU and RCU should not be used from
+        * offline cpus, filter such calls out.
+        * While this trace can be called from a preemptable section,
+        * it has no impact on the condition since tasks can migrate
+        * only from online cpus to other online cpus. Thus its safe
+        * to use raw_smp_processor_id.
+        */
+       TP_CONDITION(cpu_online(raw_smp_processor_id()))
 );
 
-TRACE_EVENT(mm_page_free,
+TRACE_EVENT_CONDITION(mm_page_free,
 
        TP_PROTO(struct page *page, unsigned int order),
 
        TP_ARGS(page, order),
 
+
+       /*
+        * This trace can be potentially called from an offlined cpu.
+        * Since trace points use RCU and RCU should not be used from
+        * offline cpus, filter such calls out.
+        * While this trace can be called from a preemptable section,
+        * it has no impact on the condition since tasks can migrate
+        * only from online cpus to other online cpus. Thus its safe
+        * to use raw_smp_processor_id.
+        */
+       TP_CONDITION(cpu_online(raw_smp_processor_id())),
+
        TP_STRUCT__entry(
                __field(        unsigned long,  pfn             )
                __field(        unsigned int,   order           )
@@ -253,12 +276,35 @@ DEFINE_EVENT(mm_page, mm_page_alloc_zone_locked,
        TP_ARGS(page, order, migratetype)
 );
 
-DEFINE_EVENT_PRINT(mm_page, mm_page_pcpu_drain,
+TRACE_EVENT_CONDITION(mm_page_pcpu_drain,
 
        TP_PROTO(struct page *page, unsigned int order, int migratetype),
 
        TP_ARGS(page, order, migratetype),
 
+       /*
+        * This trace can be potentially called from an offlined cpu.
+        * Since trace points use RCU and RCU should not be used from
+        * offline cpus, filter such calls out.
+        * While this trace can be called from a preemptable section,
+        * it has no impact on the condition since tasks can migrate
+        * only from online cpus to other online cpus. Thus its safe
+        * to use raw_smp_processor_id.
+        */
+       TP_CONDITION(cpu_online(raw_smp_processor_id())),
+
+       TP_STRUCT__entry(
+               __field(        unsigned long,  pfn             )
+               __field(        unsigned int,   order           )
+               __field(        int,            migratetype     )
+       ),
+
+       TP_fast_assign(
+               __entry->pfn            = page ? page_to_pfn(page) : -1UL;
+               __entry->order          = order;
+               __entry->migratetype    = migratetype;
+       ),
+
        TP_printk("page=%p pfn=%lu order=%d migratetype=%d",
                pfn_to_page(__entry->pfn), __entry->pfn,
                __entry->order, __entry->migratetype)
index 30fedaf3e56a253175619fbef7a9f9a1f6dc15a3..d57a575fe31fc5796e9866470e3dd40a881ca12a 100644 (file)
@@ -147,7 +147,8 @@ TRACE_EVENT(sched_switch,
                  __print_flags(__entry->prev_state & (TASK_STATE_MAX-1), "|",
                                { 1, "S"} , { 2, "D" }, { 4, "T" }, { 8, "t" },
                                { 16, "Z" }, { 32, "X" }, { 64, "x" },
-                               { 128, "K" }, { 256, "W" }, { 512, "P" }) : "R",
+                               { 128, "K" }, { 256, "W" }, { 512, "P" },
+                               { 1024, "N" }) : "R",
                __entry->prev_state & TASK_STATE_MAX ? "+" : "",
                __entry->next_comm, __entry->next_pid, __entry->next_prio)
 );
index 68c2c2000f02bb639e1445245e1e460a18349f54..073b9ac245ba0315f31a51f5df9f21bcdf2e9115 100644 (file)
@@ -43,15 +43,18 @@ DEFINE_EVENT(timer_class, timer_init,
  */
 TRACE_EVENT(timer_start,
 
-       TP_PROTO(struct timer_list *timer, unsigned long expires),
+       TP_PROTO(struct timer_list *timer,
+               unsigned long expires,
+               unsigned int flags),
 
-       TP_ARGS(timer, expires),
+       TP_ARGS(timer, expires, flags),
 
        TP_STRUCT__entry(
                __field( void *,        timer           )
                __field( void *,        function        )
                __field( unsigned long, expires         )
                __field( unsigned long, now             )
+               __field( unsigned int,  flags           )
        ),
 
        TP_fast_assign(
@@ -59,11 +62,12 @@ TRACE_EVENT(timer_start,
                __entry->function       = timer->function;
                __entry->expires        = expires;
                __entry->now            = jiffies;
+               __entry->flags          = flags;
        ),
 
-       TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld]",
+       TP_printk("timer=%p function=%pf expires=%lu [timeout=%ld] flags=0x%08x",
                  __entry->timer, __entry->function, __entry->expires,
-                 (long)__entry->expires - __entry->now)
+                 (long)__entry->expires - __entry->now, __entry->flags)
 );
 
 /**
index 880dd74371729939a0179ef3dfd487e1fa017838..c178d13d6f4c0cb51d441c59e7b4975a1913ed3e 100644 (file)
@@ -250,7 +250,6 @@ DEFINE_EVENT(writeback_class, name, \
 DEFINE_WRITEBACK_EVENT(writeback_nowork);
 DEFINE_WRITEBACK_EVENT(writeback_wake_background);
 DEFINE_WRITEBACK_EVENT(writeback_bdi_register);
-DEFINE_WRITEBACK_EVENT(writeback_bdi_unregister);
 
 DECLARE_EVENT_CLASS(wbc_class,
        TP_PROTO(struct writeback_control *wbc, struct backing_dev_info *bdi),
index 871e73f99a4d7aa13b4cd6f5bc8969421c2a9301..94d44ab2fda1821bcda7e1b541bcfeffb556e7c5 100644 (file)
@@ -1038,6 +1038,7 @@ struct drm_radeon_cs {
 #define RADEON_INFO_CURRENT_GPU_SCLK   0x22
 #define RADEON_INFO_CURRENT_GPU_MCLK   0x23
 #define RADEON_INFO_READ_REG           0x24
+#define RADEON_INFO_VA_UNMAP_WORKING   0x25
 
 struct drm_radeon_info {
        uint32_t                request;
index d65c0a09efd32041e3dd07ff80d0a421f0de6ae8..c7093c75bdd6b70a53c1c35a0950a92ad49722da 100644 (file)
@@ -143,4 +143,8 @@ struct tcp_dctcp_info {
        __u32   dctcp_ab_tot;
 };
 
+union tcp_cc_info {
+       struct tcpvegas_info    vegas;
+       struct tcp_dctcp_info   dctcp;
+};
 #endif /* _UAPI_INET_DIAG_H_ */
index bc9abfe88c9a0e1ba4a80f7f16af038c5cd1f2ac..139d4dd1cab83f1cee0399e9924138a24d350ba7 100644 (file)
@@ -31,4 +31,14 @@ struct mpls_label {
 #define MPLS_LS_TTL_MASK        0x000000FF
 #define MPLS_LS_TTL_SHIFT       0
 
+/* Reserved labels */
+#define MPLS_LABEL_IPV4NULL            0 /* RFC3032 */
+#define MPLS_LABEL_RTALERT             1 /* RFC3032 */
+#define MPLS_LABEL_IPV6NULL            2 /* RFC3032 */
+#define MPLS_LABEL_IMPLNULL            3 /* RFC3032 */
+#define MPLS_LABEL_ENTROPY             7 /* RFC6790 */
+#define MPLS_LABEL_GAL                 13 /* RFC5586 */
+#define MPLS_LABEL_OAMALERT            14 /* RFC3429 */
+#define MPLS_LABEL_EXTENSION           15 /* RFC7274 */
+
 #endif /* _UAPI_MPLS_H */
index 9993a421201c231af01d85ef7c1d355745526565..ef9f80f0f529d156a906b461cc7595c4c83358a6 100644 (file)
@@ -42,6 +42,9 @@ enum tcp_conntrack {
 /* The field td_maxack has been set */
 #define IP_CT_TCP_FLAG_MAXACK_SET              0x20
 
+/* Marks possibility for expected RFC5961 challenge ACK */
+#define IP_CT_EXP_CHALLENGE_ACK                0x40
+
 struct nf_ct_tcp_flags {
        __u8 flags;
        __u8 mask;
index 309211b3eb672f9449a0fc266ed643ac881196d3..d97f84c080daefb3e8789a59b98cedbbe387cc56 100644 (file)
@@ -167,6 +167,7 @@ enum perf_branch_sample_type_shift {
        PERF_SAMPLE_BRANCH_COND_SHIFT           = 10, /* conditional branches */
 
        PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT     = 11, /* call/ret stack */
+       PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT       = 12, /* indirect jumps */
 
        PERF_SAMPLE_BRANCH_MAX_SHIFT            /* non-ABI */
 };
@@ -186,6 +187,7 @@ enum perf_branch_sample_type {
        PERF_SAMPLE_BRANCH_COND         = 1U << PERF_SAMPLE_BRANCH_COND_SHIFT,
 
        PERF_SAMPLE_BRANCH_CALL_STACK   = 1U << PERF_SAMPLE_BRANCH_CALL_STACK_SHIFT,
+       PERF_SAMPLE_BRANCH_IND_JUMP     = 1U << PERF_SAMPLE_BRANCH_IND_JUMP_SHIFT,
 
        PERF_SAMPLE_BRANCH_MAX          = 1U << PERF_SAMPLE_BRANCH_MAX_SHIFT,
 };
@@ -563,6 +565,10 @@ struct perf_event_mmap_page {
 #define PERF_RECORD_MISC_GUEST_KERNEL          (4 << 0)
 #define PERF_RECORD_MISC_GUEST_USER            (5 << 0)
 
+/*
+ * Indicates that /proc/PID/maps parsing are truncated by time out.
+ */
+#define PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT        (1 << 12)
 /*
  * PERF_RECORD_MISC_MMAP_DATA and PERF_RECORD_MISC_COMM_EXEC are used on
  * different events so can reuse the same bit position.
@@ -800,6 +806,18 @@ enum perf_event_type {
         */
        PERF_RECORD_ITRACE_START                = 12,
 
+       /*
+        * Records the dropped/lost sample number.
+        *
+        * struct {
+        *      struct perf_event_header        header;
+        *
+        *      u64                             lost;
+        *      struct sample_id                sample_id;
+        * };
+        */
+       PERF_RECORD_LOST_SAMPLES                = 13,
+
        PERF_RECORD_MAX,                        /* non-ABI */
 };
 
index 974db03f7b1a2d9ddf96d0b34a409f8356e94d1a..17fb02f488da88efdcd071258a487ab563251f37 100644 (file)
@@ -337,7 +337,7 @@ struct rtnexthop {
 #define RTNH_F_DEAD            1       /* Nexthop is dead (used by multipath)  */
 #define RTNH_F_PERVASIVE       2       /* Do recursive gateway lookup  */
 #define RTNH_F_ONLINK          4       /* Gateway is forced on link    */
-#define RTNH_F_EXTERNAL                8       /* Route installed externally   */
+#define RTNH_F_OFFLOAD         8       /* offloaded route */
 
 /* Macros to handle hexthops */
 
index 3b9718328d8bf7732a73a13a4811b98ff667f000..faa72f4fa547bcfe643b9cfd32b83b62caf6b973 100644 (file)
@@ -112,6 +112,7 @@ enum {
 #define TCP_FASTOPEN           23      /* Enable FastOpen on listeners */
 #define TCP_TIMESTAMP          24
 #define TCP_NOTSENT_LOWAT      25      /* limit number of unsent bytes in write queue */
+#define TCP_CC_INFO            26      /* Get Congestion Control (optional) info */
 
 struct tcp_repair_opt {
        __u32   opt_code;
@@ -189,6 +190,8 @@ struct tcp_info {
 
        __u64   tcpi_pacing_rate;
        __u64   tcpi_max_pacing_rate;
+       __u64   tcpi_bytes_acked; /* RFC4898 tcpEStatsAppHCThruOctetsAcked */
+       __u64   tcpi_bytes_received; /* RFC4898 tcpEStatsAppHCThruOctetsReceived */
 };
 
 /* for TCP_MD5SIG socket option */
index 984169a819ee4c8f2163c6a5c12d24fbf273caec..d7f1cbc3766c799ac514e4ab2710cb5cbb22a0c0 100644 (file)
@@ -26,6 +26,7 @@
  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
  * SUCH DAMAGE. */
 #include <linux/types.h>
+#include <linux/virtio_types.h>
 #include <linux/virtio_ids.h>
 #include <linux/virtio_config.h>
 
index de69170a30ce525378632523417c881985f6143a..6e4bb4270ca2ea1bb9eab5e49ae7049b6061e12b 100644 (file)
@@ -37,6 +37,7 @@ enum {
        RDMA_NL_IWPM_ADD_MAPPING,
        RDMA_NL_IWPM_QUERY_MAPPING,
        RDMA_NL_IWPM_REMOVE_MAPPING,
+       RDMA_NL_IWPM_REMOTE_INFO,
        RDMA_NL_IWPM_HANDLE_ERR,
        RDMA_NL_IWPM_MAPINFO,
        RDMA_NL_IWPM_MAPINFO_NUM,
index 5321cd9636e6a48e0e4ad0e68fe159e2d015e7c8..7d95fdf9cf3e773f3d800194ca43a2f0a7acdd7a 100644 (file)
@@ -17,7 +17,7 @@ int bind_evtchn_to_irqhandler(unsigned int evtchn,
                              irq_handler_t handler,
                              unsigned long irqflags, const char *devname,
                              void *dev_id);
-int bind_virq_to_irq(unsigned int virq, unsigned int cpu);
+int bind_virq_to_irq(unsigned int virq, unsigned int cpu, bool percpu);
 int bind_virq_to_irqhandler(unsigned int virq, unsigned int cpu,
                            irq_handler_t handler,
                            unsigned long irqflags, const char *devname,
index dc24dec6023292ac6f1bd484ce506074d3e53566..b999fa381bf9fe1f37757af5e0a454cc6adb2da9 100644 (file)
@@ -465,13 +465,9 @@ endmenu # "CPU/Task time and stats accounting"
 
 menu "RCU Subsystem"
 
-choice
-       prompt "RCU Implementation"
-       default TREE_RCU
-
 config TREE_RCU
-       bool "Tree-based hierarchical RCU"
-       depends on !PREEMPT && SMP
+       bool
+       default y if !PREEMPT && SMP
        help
          This option selects the RCU implementation that is
          designed for very large SMP system with hundreds or
@@ -479,8 +475,8 @@ config TREE_RCU
          smaller systems.
 
 config PREEMPT_RCU
-       bool "Preemptible tree-based hierarchical RCU"
-       depends on PREEMPT
+       bool
+       default y if PREEMPT
        help
          This option selects the RCU implementation that is
          designed for very large SMP systems with hundreds or
@@ -491,15 +487,28 @@ config PREEMPT_RCU
          Select this option if you are unsure.
 
 config TINY_RCU
-       bool "UP-only small-memory-footprint RCU"
-       depends on !PREEMPT && !SMP
+       bool
+       default y if !PREEMPT && !SMP
        help
          This option selects the RCU implementation that is
          designed for UP systems from which real-time response
          is not required.  This option greatly reduces the
          memory footprint of RCU.
 
-endchoice
+config RCU_EXPERT
+       bool "Make expert-level adjustments to RCU configuration"
+       default n
+       help
+         This option needs to be enabled if you wish to make
+         expert-level adjustments to RCU configuration.  By default,
+         no such adjustments can be made, which has the often-beneficial
+         side-effect of preventing "make oldconfig" from asking you all
+         sorts of detailed questions about how you would like numerous
+         obscure RCU options to be set up.
+
+         Say Y if you need to make expert-level adjustments to RCU.
+
+         Say N if you are unsure.
 
 config SRCU
        bool
@@ -509,7 +518,7 @@ config SRCU
          sections.
 
 config TASKS_RCU
-       bool "Task_based RCU implementation using voluntary context switch"
+       bool
        default n
        select SRCU
        help
@@ -517,8 +526,6 @@ config TASKS_RCU
          only voluntary context switch (not preemption!), idle, and
          user-mode execution as quiescent states.
 
-         If unsure, say N.
-
 config RCU_STALL_COMMON
        def_bool ( TREE_RCU || PREEMPT_RCU || RCU_TRACE )
        help
@@ -531,9 +538,7 @@ config CONTEXT_TRACKING
        bool
 
 config RCU_USER_QS
-       bool "Consider userspace as in RCU extended quiescent state"
-       depends on HAVE_CONTEXT_TRACKING && SMP
-       select CONTEXT_TRACKING
+       bool
        help
          This option sets hooks on kernel / userspace boundaries and
          puts RCU in extended quiescent state when the CPU runs in
@@ -541,12 +546,6 @@ config RCU_USER_QS
          excluded from the global RCU state machine and thus doesn't
          try to keep the timer tick on for RCU.
 
-         Unless you want to hack and help the development of the full
-         dynticks mode, you shouldn't enable this option.  It also
-         adds unnecessary overhead.
-
-         If unsure say N
-
 config CONTEXT_TRACKING_FORCE
        bool "Force context tracking"
        depends on CONTEXT_TRACKING
@@ -578,7 +577,7 @@ config RCU_FANOUT
        int "Tree-based hierarchical RCU fanout value"
        range 2 64 if 64BIT
        range 2 32 if !64BIT
-       depends on TREE_RCU || PREEMPT_RCU
+       depends on (TREE_RCU || PREEMPT_RCU) && RCU_EXPERT
        default 64 if 64BIT
        default 32 if !64BIT
        help
@@ -596,9 +595,9 @@ config RCU_FANOUT
 
 config RCU_FANOUT_LEAF
        int "Tree-based hierarchical RCU leaf-level fanout value"
-       range 2 RCU_FANOUT if 64BIT
-       range 2 RCU_FANOUT if !64BIT
-       depends on TREE_RCU || PREEMPT_RCU
+       range 2 64 if 64BIT
+       range 2 32 if !64BIT
+       depends on (TREE_RCU || PREEMPT_RCU) && RCU_EXPERT
        default 16
        help
          This option controls the leaf-level fanout of hierarchical
@@ -621,23 +620,9 @@ config RCU_FANOUT_LEAF
 
          Take the default if unsure.
 
-config RCU_FANOUT_EXACT
-       bool "Disable tree-based hierarchical RCU auto-balancing"
-       depends on TREE_RCU || PREEMPT_RCU
-       default n
-       help
-         This option forces use of the exact RCU_FANOUT value specified,
-         regardless of imbalances in the hierarchy.  This is useful for
-         testing RCU itself, and might one day be useful on systems with
-         strong NUMA behavior.
-
-         Without RCU_FANOUT_EXACT, the code will balance the hierarchy.
-
-         Say N if unsure.
-
 config RCU_FAST_NO_HZ
        bool "Accelerate last non-dyntick-idle CPU's grace periods"
-       depends on NO_HZ_COMMON && SMP
+       depends on NO_HZ_COMMON && SMP && RCU_EXPERT
        default n
        help
          This option permits CPUs to enter dynticks-idle state even if
@@ -663,7 +648,7 @@ config TREE_RCU_TRACE
 
 config RCU_BOOST
        bool "Enable RCU priority boosting"
-       depends on RT_MUTEXES && PREEMPT_RCU
+       depends on RT_MUTEXES && PREEMPT_RCU && RCU_EXPERT
        default n
        help
          This option boosts the priority of preempted RCU readers that
@@ -680,6 +665,7 @@ config RCU_KTHREAD_PRIO
        range 0 99 if !RCU_BOOST
        default 1 if RCU_BOOST
        default 0 if !RCU_BOOST
+       depends on RCU_EXPERT
        help
          This option specifies the SCHED_FIFO priority value that will be
          assigned to the rcuc/n and rcub/n threads and is also the value
@@ -1637,7 +1623,7 @@ config PERF_EVENTS
 config DEBUG_PERF_USE_VMALLOC
        default n
        bool "Debug: use vmalloc to back perf mmap() buffers"
-       depends on PERF_EVENTS && DEBUG_KERNEL
+       depends on PERF_EVENTS && DEBUG_KERNEL && !PPC
        select PERF_USE_VMALLOC
        help
         Use vmalloc memory to back perf mmap() buffers.
index 8369ffa5f33db24a12703ce74eb7ac437ada96f9..a95bbdb2a50232224eb64c5331123701ea7c2416 100644 (file)
@@ -225,10 +225,11 @@ dev_t name_to_dev_t(const char *name)
 #endif
 
        if (strncmp(name, "/dev/", 5) != 0) {
-               unsigned maj, min;
+               unsigned maj, min, offset;
                char dummy;
 
-               if (sscanf(name, "%u:%u%c", &maj, &min, &dummy) == 2) {
+               if ((sscanf(name, "%u:%u%c", &maj, &min, &dummy) == 2) ||
+                   (sscanf(name, "%u:%u:%u:%c", &maj, &min, &offset, &dummy) == 3)) {
                        res = MKDEV(maj, min);
                        if (maj != MAJOR(res) || min != MINOR(res))
                                goto fail;
index 3aaea7ffd077c874cb1b420013458020057f45a6..a24ba9fe5bb8892dfaa7452fe78f9ef68d1d97fc 100644 (file)
@@ -47,8 +47,7 @@
 #define RECV           1
 
 #define STATE_NONE     0
-#define STATE_PENDING  1
-#define STATE_READY    2
+#define STATE_READY    1
 
 struct posix_msg_tree_node {
        struct rb_node          rb_node;
@@ -571,15 +570,12 @@ static int wq_sleep(struct mqueue_inode_info *info, int sr,
        wq_add(info, sr, ewp);
 
        for (;;) {
-               set_current_state(TASK_INTERRUPTIBLE);
+               __set_current_state(TASK_INTERRUPTIBLE);
 
                spin_unlock(&info->lock);
                time = schedule_hrtimeout_range_clock(timeout, 0,
                        HRTIMER_MODE_ABS, CLOCK_REALTIME);
 
-               while (ewp->state == STATE_PENDING)
-                       cpu_relax();
-
                if (ewp->state == STATE_READY) {
                        retval = 0;
                        goto out;
@@ -907,11 +903,15 @@ out_name:
  * list of waiting receivers. A sender checks that list before adding the new
  * message into the message array. If there is a waiting receiver, then it
  * bypasses the message array and directly hands the message over to the
- * receiver.
- * The receiver accepts the message and returns without grabbing the queue
- * spinlock. Therefore an intermediate STATE_PENDING state and memory barriers
- * are necessary. The same algorithm is used for sysv semaphores, see
- * ipc/sem.c for more details.
+ * receiver. The receiver accepts the message and returns without grabbing the
+ * queue spinlock:
+ *
+ * - Set pointer to message.
+ * - Queue the receiver task for later wakeup (without the info->lock).
+ * - Update its state to STATE_READY. Now the receiver can continue.
+ * - Wake up the process after the lock is dropped. Should the process wake up
+ *   before this wakeup (due to a timeout or a signal) it will either see
+ *   STATE_READY and continue or acquire the lock to check the state again.
  *
  * The same algorithm is used for senders.
  */
@@ -919,21 +919,29 @@ out_name:
 /* pipelined_send() - send a message directly to the task waiting in
  * sys_mq_timedreceive() (without inserting message into a queue).
  */
-static inline void pipelined_send(struct mqueue_inode_info *info,
+static inline void pipelined_send(struct wake_q_head *wake_q,
+                                 struct mqueue_inode_info *info,
                                  struct msg_msg *message,
                                  struct ext_wait_queue *receiver)
 {
        receiver->msg = message;
        list_del(&receiver->list);
-       receiver->state = STATE_PENDING;
-       wake_up_process(receiver->task);
-       smp_wmb();
+       wake_q_add(wake_q, receiver->task);
+       /*
+        * Rely on the implicit cmpxchg barrier from wake_q_add such
+        * that we can ensure that updating receiver->state is the last
+        * write operation: As once set, the receiver can continue,
+        * and if we don't have the reference count from the wake_q,
+        * yet, at that point we can later have a use-after-free
+        * condition and bogus wakeup.
+        */
        receiver->state = STATE_READY;
 }
 
 /* pipelined_receive() - if there is task waiting in sys_mq_timedsend()
  * gets its message and put to the queue (we have one free place for sure). */
-static inline void pipelined_receive(struct mqueue_inode_info *info)
+static inline void pipelined_receive(struct wake_q_head *wake_q,
+                                    struct mqueue_inode_info *info)
 {
        struct ext_wait_queue *sender = wq_get_first_waiter(info, SEND);
 
@@ -944,10 +952,9 @@ static inline void pipelined_receive(struct mqueue_inode_info *info)
        }
        if (msg_insert(sender->msg, info))
                return;
+
        list_del(&sender->list);
-       sender->state = STATE_PENDING;
-       wake_up_process(sender->task);
-       smp_wmb();
+       wake_q_add(wake_q, sender->task);
        sender->state = STATE_READY;
 }
 
@@ -965,6 +972,7 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
        struct timespec ts;
        struct posix_msg_tree_node *new_leaf = NULL;
        int ret = 0;
+       WAKE_Q(wake_q);
 
        if (u_abs_timeout) {
                int res = prepare_timeout(u_abs_timeout, &expires, &ts);
@@ -1049,7 +1057,7 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
        } else {
                receiver = wq_get_first_waiter(info, RECV);
                if (receiver) {
-                       pipelined_send(info, msg_ptr, receiver);
+                       pipelined_send(&wake_q, info, msg_ptr, receiver);
                } else {
                        /* adds message to the queue */
                        ret = msg_insert(msg_ptr, info);
@@ -1062,6 +1070,7 @@ SYSCALL_DEFINE5(mq_timedsend, mqd_t, mqdes, const char __user *, u_msg_ptr,
        }
 out_unlock:
        spin_unlock(&info->lock);
+       wake_up_q(&wake_q);
 out_free:
        if (ret)
                free_msg(msg_ptr);
@@ -1149,14 +1158,17 @@ SYSCALL_DEFINE5(mq_timedreceive, mqd_t, mqdes, char __user *, u_msg_ptr,
                        msg_ptr = wait.msg;
                }
        } else {
+               WAKE_Q(wake_q);
+
                msg_ptr = msg_get(info);
 
                inode->i_atime = inode->i_mtime = inode->i_ctime =
                                CURRENT_TIME;
 
                /* There is now free space in queue. */
-               pipelined_receive(info);
+               pipelined_receive(&wake_q, info);
                spin_unlock(&info->lock);
+               wake_up_q(&wake_q);
                ret = 0;
        }
        if (ret == 0) {
index 08561f1acd130bd68314e03278402e629b028ba4..ebdb0043203adb339e7b917dd3bb28b47d2ee480 100644 (file)
@@ -235,9 +235,16 @@ config LOCK_SPIN_ON_OWNER
        def_bool y
        depends on MUTEX_SPIN_ON_OWNER || RWSEM_SPIN_ON_OWNER
 
-config ARCH_USE_QUEUE_RWLOCK
+config ARCH_USE_QUEUED_SPINLOCKS
        bool
 
-config QUEUE_RWLOCK
-       def_bool y if ARCH_USE_QUEUE_RWLOCK
+config QUEUED_SPINLOCKS
+       def_bool y if ARCH_USE_QUEUED_SPINLOCKS
+       depends on SMP
+
+config ARCH_USE_QUEUED_RWLOCKS
+       bool
+
+config QUEUED_RWLOCKS
+       def_bool y if ARCH_USE_QUEUED_RWLOCKS
        depends on SMP
index 24f00610c575fd5d34c40dcf9fad35b358596c22..333d364be29d9e6c8b209d9eaded9d28552a36d7 100644 (file)
@@ -912,7 +912,8 @@ long compat_get_bitmap(unsigned long *mask, const compat_ulong_t __user *umask,
                         * bitmap. We must however ensure the end of the
                         * kernel bitmap is zeroed.
                         */
-                       if (nr_compat_longs-- > 0) {
+                       if (nr_compat_longs) {
+                               nr_compat_longs--;
                                if (__get_user(um, umask))
                                        return -EFAULT;
                        } else {
@@ -954,7 +955,8 @@ long compat_put_bitmap(compat_ulong_t __user *umask, unsigned long *mask,
                         * We dont want to write past the end of the userspace
                         * bitmap.
                         */
-                       if (nr_compat_longs-- > 0) {
+                       if (nr_compat_longs) {
+                               nr_compat_longs--;
                                if (__put_user(um, umask))
                                        return -EFAULT;
                        }
index 94bbe4695232cd2fa2e9c0def32de7fa27644971..9c9c9fab16cc3610afa76a6c467780482b35b0be 100644 (file)
@@ -398,7 +398,6 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen)
        err = __stop_machine(take_cpu_down, &tcd_param, cpumask_of(cpu));
        if (err) {
                /* CPU didn't die: tell everyone.  Can't complain. */
-               smpboot_unpark_threads(cpu);
                cpu_notify_nofail(CPU_DOWN_FAILED | mod, hcpu);
                goto out_release;
        }
@@ -463,6 +462,7 @@ static int smpboot_thread_call(struct notifier_block *nfb,
 
        switch (action & ~CPU_TASKS_FROZEN) {
 
+       case CPU_DOWN_FAILED:
        case CPU_ONLINE:
                smpboot_unpark_threads(cpu);
                break;
@@ -479,7 +479,7 @@ static struct notifier_block smpboot_thread_notifier = {
        .priority = CPU_PRI_SMPBOOT,
 };
 
-void __cpuinit smpboot_thread_init(void)
+void smpboot_thread_init(void)
 {
        register_cpu_notifier(&smpboot_thread_notifier);
 }
index 81aa3a4ece9f787038027bb4e5a1e456312c1182..8e13f3e54ec369f26d52e52081f013a6aa29fd23 100644 (file)
 
 static struct workqueue_struct *perf_wq;
 
+typedef int (*remote_function_f)(void *);
+
 struct remote_function_call {
        struct task_struct      *p;
-       int                     (*func)(void *info);
+       remote_function_f       func;
        void                    *info;
        int                     ret;
 };
@@ -86,7 +88,7 @@ static void remote_function(void *data)
  *         -EAGAIN - when the process moved away
  */
 static int
-task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
+task_function_call(struct task_struct *p, remote_function_f func, void *info)
 {
        struct remote_function_call data = {
                .p      = p,
@@ -110,7 +112,7 @@ task_function_call(struct task_struct *p, int (*func) (void *info), void *info)
  *
  * returns: @func return value or -ENXIO when the cpu is offline
  */
-static int cpu_function_call(int cpu, int (*func) (void *info), void *info)
+static int cpu_function_call(int cpu, remote_function_f func, void *info)
 {
        struct remote_function_call data = {
                .p      = NULL,
@@ -747,62 +749,31 @@ perf_cgroup_mark_enabled(struct perf_event *event,
 /*
  * function must be called with interrupts disbled
  */
-static enum hrtimer_restart perf_cpu_hrtimer_handler(struct hrtimer *hr)
+static enum hrtimer_restart perf_mux_hrtimer_handler(struct hrtimer *hr)
 {
        struct perf_cpu_context *cpuctx;
-       enum hrtimer_restart ret = HRTIMER_NORESTART;
        int rotations = 0;
 
        WARN_ON(!irqs_disabled());
 
        cpuctx = container_of(hr, struct perf_cpu_context, hrtimer);
-
        rotations = perf_rotate_context(cpuctx);
 
-       /*
-        * arm timer if needed
-        */
-       if (rotations) {
+       raw_spin_lock(&cpuctx->hrtimer_lock);
+       if (rotations)
                hrtimer_forward_now(hr, cpuctx->hrtimer_interval);
-               ret = HRTIMER_RESTART;
-       }
-
-       return ret;
-}
-
-/* CPU is going down */
-void perf_cpu_hrtimer_cancel(int cpu)
-{
-       struct perf_cpu_context *cpuctx;
-       struct pmu *pmu;
-       unsigned long flags;
-
-       if (WARN_ON(cpu != smp_processor_id()))
-               return;
-
-       local_irq_save(flags);
-
-       rcu_read_lock();
-
-       list_for_each_entry_rcu(pmu, &pmus, entry) {
-               cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
-
-               if (pmu->task_ctx_nr == perf_sw_context)
-                       continue;
-
-               hrtimer_cancel(&cpuctx->hrtimer);
-       }
-
-       rcu_read_unlock();
+       else
+               cpuctx->hrtimer_active = 0;
+       raw_spin_unlock(&cpuctx->hrtimer_lock);
 
-       local_irq_restore(flags);
+       return rotations ? HRTIMER_RESTART : HRTIMER_NORESTART;
 }
 
-static void __perf_cpu_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
+static void __perf_mux_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
 {
-       struct hrtimer *hr = &cpuctx->hrtimer;
+       struct hrtimer *timer = &cpuctx->hrtimer;
        struct pmu *pmu = cpuctx->ctx.pmu;
-       int timer;
+       u64 interval;
 
        /* no multiplexing needed for SW PMU */
        if (pmu->task_ctx_nr == perf_sw_context)
@@ -812,31 +783,36 @@ static void __perf_cpu_hrtimer_init(struct perf_cpu_context *cpuctx, int cpu)
         * check default is sane, if not set then force to
         * default interval (1/tick)
         */
-       timer = pmu->hrtimer_interval_ms;
-       if (timer < 1)
-               timer = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
+       interval = pmu->hrtimer_interval_ms;
+       if (interval < 1)
+               interval = pmu->hrtimer_interval_ms = PERF_CPU_HRTIMER;
 
-       cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
+       cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * interval);
 
-       hrtimer_init(hr, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
-       hr->function = perf_cpu_hrtimer_handler;
+       raw_spin_lock_init(&cpuctx->hrtimer_lock);
+       hrtimer_init(timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
+       timer->function = perf_mux_hrtimer_handler;
 }
 
-static void perf_cpu_hrtimer_restart(struct perf_cpu_context *cpuctx)
+static int perf_mux_hrtimer_restart(struct perf_cpu_context *cpuctx)
 {
-       struct hrtimer *hr = &cpuctx->hrtimer;
+       struct hrtimer *timer = &cpuctx->hrtimer;
        struct pmu *pmu = cpuctx->ctx.pmu;
+       unsigned long flags;
 
        /* not for SW PMU */
        if (pmu->task_ctx_nr == perf_sw_context)
-               return;
+               return 0;
 
-       if (hrtimer_active(hr))
-               return;
+       raw_spin_lock_irqsave(&cpuctx->hrtimer_lock, flags);
+       if (!cpuctx->hrtimer_active) {
+               cpuctx->hrtimer_active = 1;
+               hrtimer_forward_now(timer, cpuctx->hrtimer_interval);
+               hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
+       }
+       raw_spin_unlock_irqrestore(&cpuctx->hrtimer_lock, flags);
 
-       if (!hrtimer_callback_running(hr))
-               __hrtimer_start_range_ns(hr, cpuctx->hrtimer_interval,
-                                        0, HRTIMER_MODE_REL_PINNED, 0);
+       return 0;
 }
 
 void perf_pmu_disable(struct pmu *pmu)
@@ -913,10 +889,30 @@ static void put_ctx(struct perf_event_context *ctx)
  * Those places that change perf_event::ctx will hold both
  * perf_event_ctx::mutex of the 'old' and 'new' ctx value.
  *
- * Lock ordering is by mutex address. There is one other site where
- * perf_event_context::mutex nests and that is put_event(). But remember that
- * that is a parent<->child context relation, and migration does not affect
- * children, therefore these two orderings should not interact.
+ * Lock ordering is by mutex address. There are two other sites where
+ * perf_event_context::mutex nests and those are:
+ *
+ *  - perf_event_exit_task_context()   [ child , 0 ]
+ *      __perf_event_exit_task()
+ *        sync_child_event()
+ *          put_event()                        [ parent, 1 ]
+ *
+ *  - perf_event_init_context()                [ parent, 0 ]
+ *      inherit_task_group()
+ *        inherit_group()
+ *          inherit_event()
+ *            perf_event_alloc()
+ *              perf_init_event()
+ *                perf_try_init_event()        [ child , 1 ]
+ *
+ * While it appears there is an obvious deadlock here -- the parent and child
+ * nesting levels are inverted between the two. This is in fact safe because
+ * life-time rules separate them. That is an exiting task cannot fork, and a
+ * spawning task cannot (yet) exit.
+ *
+ * But remember that that these are parent<->child context relations, and
+ * migration does not affect children, therefore these two orderings should not
+ * interact.
  *
  * The change in perf_event::ctx does not affect children (as claimed above)
  * because the sys_perf_event_open() case will install a new event and break
@@ -1915,7 +1911,7 @@ group_sched_in(struct perf_event *group_event,
 
        if (event_sched_in(group_event, cpuctx, ctx)) {
                pmu->cancel_txn(pmu);
-               perf_cpu_hrtimer_restart(cpuctx);
+               perf_mux_hrtimer_restart(cpuctx);
                return -EAGAIN;
        }
 
@@ -1962,7 +1958,7 @@ group_error:
 
        pmu->cancel_txn(pmu);
 
-       perf_cpu_hrtimer_restart(cpuctx);
+       perf_mux_hrtimer_restart(cpuctx);
 
        return -EAGAIN;
 }
@@ -2235,7 +2231,7 @@ static int __perf_event_enable(void *info)
                 */
                if (leader != event) {
                        group_sched_out(leader, cpuctx, ctx);
-                       perf_cpu_hrtimer_restart(cpuctx);
+                       perf_mux_hrtimer_restart(cpuctx);
                }
                if (leader->attr.pinned) {
                        update_group_times(leader);
@@ -3422,7 +3418,6 @@ static void free_event_rcu(struct rcu_head *head)
        if (event->ns)
                put_pid_ns(event->ns);
        perf_event_free_filter(event);
-       perf_event_free_bpf_prog(event);
        kfree(event);
 }
 
@@ -3553,6 +3548,8 @@ static void __free_event(struct perf_event *event)
                        put_callchain_buffers();
        }
 
+       perf_event_free_bpf_prog(event);
+
        if (event->destroy)
                event->destroy(event);
 
@@ -3657,9 +3654,6 @@ static void perf_remove_from_owner(struct perf_event *event)
        }
 }
 
-/*
- * Called when the last reference to the file is gone.
- */
 static void put_event(struct perf_event *event)
 {
        struct perf_event_context *ctx;
@@ -3697,6 +3691,9 @@ int perf_event_release_kernel(struct perf_event *event)
 }
 EXPORT_SYMBOL_GPL(perf_event_release_kernel);
 
+/*
+ * Called when the last reference to the file is gone.
+ */
 static int perf_release(struct inode *inode, struct file *file)
 {
        put_event(file->private_data);
@@ -4310,20 +4307,20 @@ static void ring_buffer_attach(struct perf_event *event,
                WARN_ON_ONCE(event->rcu_pending);
 
                old_rb = event->rb;
-               event->rcu_batches = get_state_synchronize_rcu();
-               event->rcu_pending = 1;
-
                spin_lock_irqsave(&old_rb->event_lock, flags);
                list_del_rcu(&event->rb_entry);
                spin_unlock_irqrestore(&old_rb->event_lock, flags);
-       }
 
-       if (event->rcu_pending && rb) {
-               cond_synchronize_rcu(event->rcu_batches);
-               event->rcu_pending = 0;
+               event->rcu_batches = get_state_synchronize_rcu();
+               event->rcu_pending = 1;
        }
 
        if (rb) {
+               if (event->rcu_pending) {
+                       cond_synchronize_rcu(event->rcu_batches);
+                       event->rcu_pending = 0;
+               }
+
                spin_lock_irqsave(&rb->event_lock, flags);
                list_add_rcu(&event->rb_entry, &rb->event_list);
                spin_unlock_irqrestore(&rb->event_lock, flags);
@@ -5360,9 +5357,9 @@ void perf_prepare_sample(struct perf_event_header *header,
        }
 }
 
-static void perf_event_output(struct perf_event *event,
-                               struct perf_sample_data *data,
-                               struct pt_regs *regs)
+void perf_event_output(struct perf_event *event,
+                       struct perf_sample_data *data,
+                       struct pt_regs *regs)
 {
        struct perf_output_handle handle;
        struct perf_event_header header;
@@ -5953,6 +5950,39 @@ void perf_event_aux_event(struct perf_event *event, unsigned long head,
        perf_output_end(&handle);
 }
 
+/*
+ * Lost/dropped samples logging
+ */
+void perf_log_lost_samples(struct perf_event *event, u64 lost)
+{
+       struct perf_output_handle handle;
+       struct perf_sample_data sample;
+       int ret;
+
+       struct {
+               struct perf_event_header        header;
+               u64                             lost;
+       } lost_samples_event = {
+               .header = {
+                       .type = PERF_RECORD_LOST_SAMPLES,
+                       .misc = 0,
+                       .size = sizeof(lost_samples_event),
+               },
+               .lost           = lost,
+       };
+
+       perf_event_header__init_id(&lost_samples_event.header, &sample, event);
+
+       ret = perf_output_begin(&handle, event,
+                               lost_samples_event.header.size);
+       if (ret)
+               return;
+
+       perf_output_put(&handle, lost_samples_event);
+       perf_event__output_id_sample(event, &handle, &sample);
+       perf_output_end(&handle);
+}
+
 /*
  * IRQ throttle logging
  */
@@ -6843,9 +6873,8 @@ static void perf_swevent_start_hrtimer(struct perf_event *event)
        } else {
                period = max_t(u64, 10000, hwc->sample_period);
        }
-       __hrtimer_start_range_ns(&hwc->hrtimer,
-                               ns_to_ktime(period), 0,
-                               HRTIMER_MODE_REL_PINNED, 0);
+       hrtimer_start(&hwc->hrtimer, ns_to_ktime(period),
+                     HRTIMER_MODE_REL_PINNED);
 }
 
 static void perf_swevent_cancel_hrtimer(struct perf_event *event)
@@ -7146,6 +7175,8 @@ perf_event_mux_interval_ms_show(struct device *dev,
        return snprintf(page, PAGE_SIZE-1, "%d\n", pmu->hrtimer_interval_ms);
 }
 
+static DEFINE_MUTEX(mux_interval_mutex);
+
 static ssize_t
 perf_event_mux_interval_ms_store(struct device *dev,
                                 struct device_attribute *attr,
@@ -7165,17 +7196,21 @@ perf_event_mux_interval_ms_store(struct device *dev,
        if (timer == pmu->hrtimer_interval_ms)
                return count;
 
+       mutex_lock(&mux_interval_mutex);
        pmu->hrtimer_interval_ms = timer;
 
        /* update all cpuctx for this PMU */
-       for_each_possible_cpu(cpu) {
+       get_online_cpus();
+       for_each_online_cpu(cpu) {
                struct perf_cpu_context *cpuctx;
                cpuctx = per_cpu_ptr(pmu->pmu_cpu_context, cpu);
                cpuctx->hrtimer_interval = ns_to_ktime(NSEC_PER_MSEC * timer);
 
-               if (hrtimer_active(&cpuctx->hrtimer))
-                       hrtimer_forward_now(&cpuctx->hrtimer, cpuctx->hrtimer_interval);
+               cpu_function_call(cpu,
+                       (remote_function_f)perf_mux_hrtimer_restart, cpuctx);
        }
+       put_online_cpus();
+       mutex_unlock(&mux_interval_mutex);
 
        return count;
 }
@@ -7280,7 +7315,7 @@ skip_type:
                lockdep_set_class(&cpuctx->ctx.lock, &cpuctx_lock);
                cpuctx->ctx.pmu = pmu;
 
-               __perf_cpu_hrtimer_init(cpuctx, cpu);
+               __perf_mux_hrtimer_init(cpuctx, cpu);
 
                cpuctx->unique_pmu = pmu;
        }
@@ -7364,7 +7399,12 @@ static int perf_try_init_event(struct pmu *pmu, struct perf_event *event)
                return -ENODEV;
 
        if (event->group_leader != event) {
-               ctx = perf_event_ctx_lock(event->group_leader);
+               /*
+                * This ctx->mutex can nest when we're called through
+                * inheritance. See the perf_event_ctx_lock_nested() comment.
+                */
+               ctx = perf_event_ctx_lock_nested(event->group_leader,
+                                                SINGLE_DEPTH_NESTING);
                BUG_ON(!ctx);
        }
 
index 9f6ce9ba4a04330d689345bc312fc765f0804a31..2deb24c7a40dd979313eb1fcff58044d2d948888 100644 (file)
@@ -72,15 +72,6 @@ static inline bool rb_has_aux(struct ring_buffer *rb)
 void perf_event_aux_event(struct perf_event *event, unsigned long head,
                          unsigned long size, u64 flags);
 
-extern void
-perf_event_header__init_id(struct perf_event_header *header,
-                          struct perf_sample_data *data,
-                          struct perf_event *event);
-extern void
-perf_event__output_id_sample(struct perf_event *event,
-                            struct perf_output_handle *handle,
-                            struct perf_sample_data *sample);
-
 extern struct page *
 perf_mmap_to_page(struct ring_buffer *rb, unsigned long pgoff);
 
index 232f00f273cbe419d2738d5f83465dd96529ee17..96472824a752f76fe651ec1bfb7ab7a52411a12c 100644 (file)
@@ -141,7 +141,7 @@ int perf_output_begin(struct perf_output_handle *handle,
        perf_output_get_handle(handle);
 
        do {
-               tail = ACCESS_ONCE(rb->user_page->data_tail);
+               tail = READ_ONCE_CTRL(rb->user_page->data_tail);
                offset = head = local_read(&rb->head);
                if (!rb->overwrite &&
                    unlikely(CIRC_SPACE(head, tail, perf_data_size(rb)) < size))
@@ -493,6 +493,20 @@ int rb_alloc_aux(struct ring_buffer *rb, struct perf_event *event,
                        rb->aux_pages[rb->aux_nr_pages] = page_address(page++);
        }
 
+       /*
+        * In overwrite mode, PMUs that don't support SG may not handle more
+        * than one contiguous allocation, since they rely on PMI to do double
+        * buffering. In this case, the entire buffer has to be one contiguous
+        * chunk.
+        */
+       if ((event->pmu->capabilities & PERF_PMU_CAP_AUX_NO_SG) &&
+           overwrite) {
+               struct page *page = virt_to_page(rb->aux_pages[0]);
+
+               if (page_private(page) != max_order)
+                       goto out;
+       }
+
        rb->aux_priv = event->pmu->setup_aux(event->cpu, rb->aux_pages, nr_pages,
                                             overwrite);
        if (!rb->aux_priv)
index 03c1eaaa6ef56f56a670488eaf572eb8c6f58d4e..0bb88b555550580dca507fa34fcc7c60a5a013ee 100644 (file)
@@ -1091,10 +1091,7 @@ static void posix_cpu_timers_init_group(struct signal_struct *sig)
 {
        unsigned long cpu_limit;
 
-       /* Thread group counters. */
-       thread_group_cputime_init(sig);
-
-       cpu_limit = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
+       cpu_limit = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
        if (cpu_limit != RLIM_INFINITY) {
                sig->cputime_expires.prof_exp = secs_to_cputime(cpu_limit);
                sig->cputimer.running = 1;
@@ -1396,6 +1393,9 @@ static struct task_struct *copy_process(unsigned long clone_flags,
        p->hardirq_context = 0;
        p->softirq_context = 0;
 #endif
+
+       p->pagefault_disabled = 0;
+
 #ifdef CONFIG_LOCKDEP
        p->lockdep_depth = 0; /* no locks held yet */
        p->curr_chain_key = 0;
index 2579e407ff67d039106207f78a466f824e515db6..ea6ca0bca52570b8cd88a9c428016cd54cf55a0a 100644 (file)
@@ -1090,9 +1090,11 @@ static void __unqueue_futex(struct futex_q *q)
 
 /*
  * The hash bucket lock must be held when this is called.
- * Afterwards, the futex_q must not be accessed.
+ * Afterwards, the futex_q must not be accessed. Callers
+ * must ensure to later call wake_up_q() for the actual
+ * wakeups to occur.
  */
-static void wake_futex(struct futex_q *q)
+static void mark_wake_futex(struct wake_q_head *wake_q, struct futex_q *q)
 {
        struct task_struct *p = q->task;
 
@@ -1100,14 +1102,10 @@ static void wake_futex(struct futex_q *q)
                return;
 
        /*
-        * We set q->lock_ptr = NULL _before_ we wake up the task. If
-        * a non-futex wake up happens on another CPU then the task
-        * might exit and p would dereference a non-existing task
-        * struct. Prevent this by holding a reference on p across the
-        * wake up.
+        * Queue the task for later wakeup for after we've released
+        * the hb->lock. wake_q_add() grabs reference to p.
         */
-       get_task_struct(p);
-
+       wake_q_add(wake_q, p);
        __unqueue_futex(q);
        /*
         * The waiting task can free the futex_q as soon as
@@ -1117,9 +1115,6 @@ static void wake_futex(struct futex_q *q)
         */
        smp_wmb();
        q->lock_ptr = NULL;
-
-       wake_up_state(p, TASK_NORMAL);
-       put_task_struct(p);
 }
 
 static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this)
@@ -1217,6 +1212,7 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
        struct futex_q *this, *next;
        union futex_key key = FUTEX_KEY_INIT;
        int ret;
+       WAKE_Q(wake_q);
 
        if (!bitset)
                return -EINVAL;
@@ -1244,13 +1240,14 @@ futex_wake(u32 __user *uaddr, unsigned int flags, int nr_wake, u32 bitset)
                        if (!(this->bitset & bitset))
                                continue;
 
-                       wake_futex(this);
+                       mark_wake_futex(&wake_q, this);
                        if (++ret >= nr_wake)
                                break;
                }
        }
 
        spin_unlock(&hb->lock);
+       wake_up_q(&wake_q);
 out_put_key:
        put_futex_key(&key);
 out:
@@ -1269,6 +1266,7 @@ futex_wake_op(u32 __user *uaddr1, unsigned int flags, u32 __user *uaddr2,
        struct futex_hash_bucket *hb1, *hb2;
        struct futex_q *this, *next;
        int ret, op_ret;
+       WAKE_Q(wake_q);
 
 retry:
        ret = get_futex_key(uaddr1, flags & FLAGS_SHARED, &key1, VERIFY_READ);
@@ -1320,7 +1318,7 @@ retry_private:
                                ret = -EINVAL;
                                goto out_unlock;
                        }
-                       wake_futex(this);
+                       mark_wake_futex(&wake_q, this);
                        if (++ret >= nr_wake)
                                break;
                }
@@ -1334,7 +1332,7 @@ retry_private:
                                        ret = -EINVAL;
                                        goto out_unlock;
                                }
-                               wake_futex(this);
+                               mark_wake_futex(&wake_q, this);
                                if (++op_ret >= nr_wake2)
                                        break;
                        }
@@ -1344,6 +1342,7 @@ retry_private:
 
 out_unlock:
        double_unlock_hb(hb1, hb2);
+       wake_up_q(&wake_q);
 out_put_keys:
        put_futex_key(&key2);
 out_put_key1:
@@ -1503,6 +1502,7 @@ static int futex_requeue(u32 __user *uaddr1, unsigned int flags,
        struct futex_pi_state *pi_state = NULL;
        struct futex_hash_bucket *hb1, *hb2;
        struct futex_q *this, *next;
+       WAKE_Q(wake_q);
 
        if (requeue_pi) {
                /*
@@ -1679,7 +1679,7 @@ retry_private:
                 * woken by futex_unlock_pi().
                 */
                if (++task_count <= nr_wake && !requeue_pi) {
-                       wake_futex(this);
+                       mark_wake_futex(&wake_q, this);
                        continue;
                }
 
@@ -1719,6 +1719,7 @@ retry_private:
 out_unlock:
        free_pi_state(pi_state);
        double_unlock_hb(hb1, hb2);
+       wake_up_q(&wake_q);
        hb_waiters_dec(hb2);
 
        /*
@@ -2055,7 +2056,7 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
 {
        /*
         * The task state is guaranteed to be set before another task can
-        * wake it. set_current_state() is implemented using set_mb() and
+        * wake it. set_current_state() is implemented using smp_store_mb() and
         * queue_me() calls spin_unlock() upon completion, both serializing
         * access to the hash list and forcing another memory barrier.
         */
@@ -2063,11 +2064,8 @@ static void futex_wait_queue_me(struct futex_hash_bucket *hb, struct futex_q *q,
        queue_me(q, hb);
 
        /* Arm the timer */
-       if (timeout) {
+       if (timeout)
                hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
-               if (!hrtimer_active(&timeout->timer))
-                       timeout->task = NULL;
-       }
 
        /*
         * If we have been removed from the hash list, then another task
index eb9a4ea394ab33fdde25420f11cb9021df384824..55016b2151f3b3d66db0ed2b838de481fbf99d57 100644 (file)
@@ -949,6 +949,20 @@ int irq_chip_retrigger_hierarchy(struct irq_data *data)
        return -ENOSYS;
 }
 
+/**
+ * irq_chip_set_vcpu_affinity_parent - Set vcpu affinity on the parent interrupt
+ * @data:      Pointer to interrupt specific data
+ * @dest:      The vcpu affinity information
+ */
+int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, void *vcpu_info)
+{
+       data = data->parent_data;
+       if (data->chip->irq_set_vcpu_affinity)
+               return data->chip->irq_set_vcpu_affinity(data, vcpu_info);
+
+       return -ENOSYS;
+}
+
 /**
  * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt
  * @data:      Pointer to interrupt specific data
index 988dc58e8847f6ebdbcd78348d9f527a9e4f2dfe..2feb6feca0cc96dff8514c45750542386db5ec53 100644 (file)
@@ -57,5 +57,6 @@ struct irq_chip dummy_irq_chip = {
        .irq_ack        = noop,
        .irq_mask       = noop,
        .irq_unmask     = noop,
+       .flags          = IRQCHIP_SKIP_SET_WAKE,
 };
 EXPORT_SYMBOL_GPL(dummy_irq_chip);
index df553b0af936be2aa8f5ee5e1968da0c25e88384..b93d434e70bdc63882071ea699b63b2a849e3400 100644 (file)
@@ -59,8 +59,6 @@ enum {
 #include "debug.h"
 #include "settings.h"
 
-#define irq_data_to_desc(data) container_of(data, struct irq_desc, irq_data)
-
 extern int __irq_set_trigger(struct irq_desc *desc, unsigned int irq,
                unsigned long flags);
 extern void __disable_irq(struct irq_desc *desc, unsigned int irq);
index e68932bb308e96e1c6aa184dc7d9972fba8df06e..b1c7e8f46bfb2f1019f90846448aeda5628df10f 100644 (file)
@@ -256,6 +256,37 @@ int irq_set_affinity_hint(unsigned int irq, const struct cpumask *m)
 }
 EXPORT_SYMBOL_GPL(irq_set_affinity_hint);
 
+/**
+ *     irq_set_vcpu_affinity - Set vcpu affinity for the interrupt
+ *     @irq: interrupt number to set affinity
+ *     @vcpu_info: vCPU specific data
+ *
+ *     This function uses the vCPU specific data to set the vCPU
+ *     affinity for an irq. The vCPU specific data is passed from
+ *     outside, such as KVM. One example code path is as below:
+ *     KVM -> IOMMU -> irq_set_vcpu_affinity().
+ */
+int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info)
+{
+       unsigned long flags;
+       struct irq_desc *desc = irq_get_desc_lock(irq, &flags, 0);
+       struct irq_data *data;
+       struct irq_chip *chip;
+       int ret = -ENOSYS;
+
+       if (!desc)
+               return -EINVAL;
+
+       data = irq_desc_get_irq_data(desc);
+       chip = irq_data_get_irq_chip(data);
+       if (chip && chip->irq_set_vcpu_affinity)
+               ret = chip->irq_set_vcpu_affinity(data, vcpu_info);
+       irq_put_desc_unlock(desc, flags);
+
+       return ret;
+}
+EXPORT_SYMBOL_GPL(irq_set_vcpu_affinity);
+
 static void irq_affinity_notify(struct work_struct *work)
 {
        struct irq_affinity_notify *notify =
index ca3f4aaff707db1d2aa28bee7f388be67a3317b5..dd203e276b0775043ddfa4fcdde79cb38baf0b4e 100644 (file)
@@ -7,7 +7,7 @@
 void irq_move_masked_irq(struct irq_data *idata)
 {
        struct irq_desc *desc = irq_data_to_desc(idata);
-       struct irq_chip *chip = idata->chip;
+       struct irq_chip *chip = desc->irq_data.chip;
 
        if (likely(!irqd_is_setaffinity_pending(&desc->irq_data)))
                return;
@@ -52,6 +52,13 @@ void irq_move_irq(struct irq_data *idata)
 {
        bool masked;
 
+       /*
+        * Get top level irq_data when CONFIG_IRQ_DOMAIN_HIERARCHY is enabled,
+        * and it should be optimized away when CONFIG_IRQ_DOMAIN_HIERARCHY is
+        * disabled. So we avoid an "#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY" here.
+        */
+       idata = irq_desc_get_irq_data(irq_data_to_desc(idata));
+
        if (likely(!irqd_is_setaffinity_pending(idata)))
                return;
 
index de7a416cca2a79e537ff9fdb489aef09b4c01662..7dd5c9918e4c243df3504e2f2d9fb600e881cfed 100644 (file)
@@ -17,6 +17,7 @@ obj-$(CONFIG_SMP) += spinlock.o
 obj-$(CONFIG_LOCK_SPIN_ON_OWNER) += osq_lock.o
 obj-$(CONFIG_SMP) += lglock.o
 obj-$(CONFIG_PROVE_LOCKING) += spinlock.o
+obj-$(CONFIG_QUEUED_SPINLOCKS) += qspinlock.o
 obj-$(CONFIG_RT_MUTEXES) += rtmutex.o
 obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
 obj-$(CONFIG_RT_MUTEX_TESTER) += rtmutex-tester.o
@@ -25,5 +26,5 @@ obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
 obj-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
 obj-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem-xadd.o
 obj-$(CONFIG_PERCPU_RWSEM) += percpu-rwsem.o
-obj-$(CONFIG_QUEUE_RWLOCK) += qrwlock.o
+obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
 obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
index 86ae2aebf00432f4d681a413febebff79406889d..951cfcd10b4a0dc98d81f59ec61667378922269e 100644 (file)
@@ -60,6 +60,28 @@ void lg_local_unlock_cpu(struct lglock *lg, int cpu)
 }
 EXPORT_SYMBOL(lg_local_unlock_cpu);
 
+void lg_double_lock(struct lglock *lg, int cpu1, int cpu2)
+{
+       BUG_ON(cpu1 == cpu2);
+
+       /* lock in cpu order, just like lg_global_lock */
+       if (cpu2 < cpu1)
+               swap(cpu1, cpu2);
+
+       preempt_disable();
+       lock_acquire_shared(&lg->lock_dep_map, 0, 0, NULL, _RET_IP_);
+       arch_spin_lock(per_cpu_ptr(lg->lock, cpu1));
+       arch_spin_lock(per_cpu_ptr(lg->lock, cpu2));
+}
+
+void lg_double_unlock(struct lglock *lg, int cpu1, int cpu2)
+{
+       lock_release(&lg->lock_dep_map, 1, _RET_IP_);
+       arch_spin_unlock(per_cpu_ptr(lg->lock, cpu1));
+       arch_spin_unlock(per_cpu_ptr(lg->lock, cpu2));
+       preempt_enable();
+}
+
 void lg_global_lock(struct lglock *lg)
 {
        int i;
index a0831e1b99f4aabd6c80ea68cedb6350a7a9affc..456614136f1a2caed847a9d9051754e11e1a4ed3 100644 (file)
@@ -3900,7 +3900,8 @@ static void zap_class(struct lock_class *class)
        list_del_rcu(&class->hash_entry);
        list_del_rcu(&class->lock_entry);
 
-       class->key = NULL;
+       RCU_INIT_POINTER(class->key, NULL);
+       RCU_INIT_POINTER(class->name, NULL);
 }
 
 static inline int within(const void *addr, void *start, unsigned long size)
@@ -4066,8 +4067,7 @@ void __init lockdep_info(void)
 
 #ifdef CONFIG_DEBUG_LOCKDEP
        if (lockdep_init_error) {
-               printk("WARNING: lockdep init error! lock-%s was acquired"
-                       "before lockdep_init\n", lock_init_error);
+               printk("WARNING: lockdep init error: lock '%s' was acquired before lockdep_init().\n", lock_init_error);
                printk("Call stack leading to lockdep invocation was:\n");
                print_stack_trace(&lockdep_init_trace, 0);
        }
index ef43ac4bafb59b83ab979a680d49d6077749f955..d83d798bef95a042e1060a35bf4b79e7c7a6c05c 100644 (file)
@@ -426,10 +426,12 @@ static void seq_lock_time(struct seq_file *m, struct lock_time *lt)
 
 static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
 {
-       char name[39];
-       struct lock_class *class;
+       struct lockdep_subclass_key *ckey;
        struct lock_class_stats *stats;
+       struct lock_class *class;
+       const char *cname;
        int i, namelen;
+       char name[39];
 
        class = data->class;
        stats = &data->stats;
@@ -440,15 +442,25 @@ static void seq_stats(struct seq_file *m, struct lock_stat_data *data)
        if (class->subclass)
                namelen -= 2;
 
-       if (!class->name) {
+       rcu_read_lock_sched();
+       cname = rcu_dereference_sched(class->name);
+       ckey  = rcu_dereference_sched(class->key);
+
+       if (!cname && !ckey) {
+               rcu_read_unlock_sched();
+               return;
+
+       } else if (!cname) {
                char str[KSYM_NAME_LEN];
                const char *key_name;
 
-               key_name = __get_key_name(class->key, str);
+               key_name = __get_key_name(ckey, str);
                snprintf(name, namelen, "%s", key_name);
        } else {
-               snprintf(name, namelen, "%s", class->name);
+               snprintf(name, namelen, "%s", cname);
        }
+       rcu_read_unlock_sched();
+
        namelen = strlen(name);
        if (class->name_version > 1) {
                snprintf(name+namelen, 3, "#%d", class->name_version);
index ec8cce259779061dd863e6a48ff1dcfa1a7a131c..32244186f1f2ae0e7a6343ad084f416aa0cda055 100644 (file)
@@ -122,12 +122,12 @@ static int torture_lock_busted_write_lock(void)
 
 static void torture_lock_busted_write_delay(struct torture_random_state *trsp)
 {
-       const unsigned long longdelay_us = 100;
+       const unsigned long longdelay_ms = 100;
 
        /* We want a long delay occasionally to force massive contention.  */
        if (!(torture_random(trsp) %
-             (cxt.nrealwriters_stress * 2000 * longdelay_us)))
-               mdelay(longdelay_us);
+             (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
+               mdelay(longdelay_ms);
 #ifdef CONFIG_PREEMPT
        if (!(torture_random(trsp) % (cxt.nrealwriters_stress * 20000)))
                preempt_schedule();  /* Allow test to be preempted. */
@@ -160,14 +160,14 @@ static int torture_spin_lock_write_lock(void) __acquires(torture_spinlock)
 static void torture_spin_lock_write_delay(struct torture_random_state *trsp)
 {
        const unsigned long shortdelay_us = 2;
-       const unsigned long longdelay_us = 100;
+       const unsigned long longdelay_ms = 100;
 
        /* We want a short delay mostly to emulate likely code, and
         * we want a long delay occasionally to force massive contention.
         */
        if (!(torture_random(trsp) %
-             (cxt.nrealwriters_stress * 2000 * longdelay_us)))
-               mdelay(longdelay_us);
+             (cxt.nrealwriters_stress * 2000 * longdelay_ms)))
+               mdelay(longdelay_ms);
        if (!(torture_random(trsp) %
              (cxt.nrealwriters_stress * 2 * shortdelay_us)))
                udelay(shortdelay_us);
@@ -309,7 +309,7 @@ static int torture_rwlock_read_lock_irq(void) __acquires(torture_rwlock)
 static void torture_rwlock_read_unlock_irq(void)
 __releases(torture_rwlock)
 {
-       write_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
+       read_unlock_irqrestore(&torture_rwlock, cxt.cur_ops->flags);
 }
 
 static struct lock_torture_ops rw_lock_irq_ops = {
index 75e114bdf3f26f379c4382dce2bc5c128c06b868..fd91aaa4554c8be6cf5b981d7ac3022a8046e77e 100644 (file)
@@ -17,6 +17,7 @@
 struct mcs_spinlock {
        struct mcs_spinlock *next;
        int locked; /* 1 if lock acquired */
+       int count;  /* nesting count, see qspinlock.c */
 };
 
 #ifndef arch_mcs_spin_lock_contended
index f956ede7f90df0b31210df87843f1bafc2ddc7cb..6c5da483966bde7aea3c7e7d43a42c0b55349f65 100644 (file)
@@ -1,5 +1,5 @@
 /*
- * Queue read/write lock
+ * Queued read/write locks
  *
  * This program is free software; you can redistribute it and/or modify
  * it under the terms of the GNU General Public License as published by
 #include <linux/hardirq.h>
 #include <asm/qrwlock.h>
 
+/*
+ * This internal data structure is used for optimizing access to some of
+ * the subfields within the atomic_t cnts.
+ */
+struct __qrwlock {
+       union {
+               atomic_t cnts;
+               struct {
+#ifdef __LITTLE_ENDIAN
+                       u8 wmode;       /* Writer mode   */
+                       u8 rcnts[3];    /* Reader counts */
+#else
+                       u8 rcnts[3];    /* Reader counts */
+                       u8 wmode;       /* Writer mode   */
+#endif
+               };
+       };
+       arch_spinlock_t lock;
+};
+
 /**
  * rspin_until_writer_unlock - inc reader count & spin until writer is gone
  * @lock  : Pointer to queue rwlock structure
@@ -107,10 +127,10 @@ void queue_write_lock_slowpath(struct qrwlock *lock)
         * or wait for a previous writer to go away.
         */
        for (;;) {
-               cnts = atomic_read(&lock->cnts);
-               if (!(cnts & _QW_WMASK) &&
-                   (atomic_cmpxchg(&lock->cnts, cnts,
-                                   cnts | _QW_WAITING) == cnts))
+               struct __qrwlock *l = (struct __qrwlock *)lock;
+
+               if (!READ_ONCE(l->wmode) &&
+                  (cmpxchg(&l->wmode, 0, _QW_WAITING) == 0))
                        break;
 
                cpu_relax_lowlatency();
diff --git a/kernel/locking/qspinlock.c b/kernel/locking/qspinlock.c
new file mode 100644 (file)
index 0000000..38c4920
--- /dev/null
@@ -0,0 +1,473 @@
+/*
+ * Queued spinlock
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * (C) Copyright 2013-2015 Hewlett-Packard Development Company, L.P.
+ * (C) Copyright 2013-2014 Red Hat, Inc.
+ * (C) Copyright 2015 Intel Corp.
+ *
+ * Authors: Waiman Long <waiman.long@hp.com>
+ *          Peter Zijlstra <peterz@infradead.org>
+ */
+
+#ifndef _GEN_PV_LOCK_SLOWPATH
+
+#include <linux/smp.h>
+#include <linux/bug.h>
+#include <linux/cpumask.h>
+#include <linux/percpu.h>
+#include <linux/hardirq.h>
+#include <linux/mutex.h>
+#include <asm/byteorder.h>
+#include <asm/qspinlock.h>
+
+/*
+ * The basic principle of a queue-based spinlock can best be understood
+ * by studying a classic queue-based spinlock implementation called the
+ * MCS lock. The paper below provides a good description for this kind
+ * of lock.
+ *
+ * http://www.cise.ufl.edu/tr/DOC/REP-1992-71.pdf
+ *
+ * This queued spinlock implementation is based on the MCS lock, however to make
+ * it fit the 4 bytes we assume spinlock_t to be, and preserve its existing
+ * API, we must modify it somehow.
+ *
+ * In particular; where the traditional MCS lock consists of a tail pointer
+ * (8 bytes) and needs the next pointer (another 8 bytes) of its own node to
+ * unlock the next pending (next->locked), we compress both these: {tail,
+ * next->locked} into a single u32 value.
+ *
+ * Since a spinlock disables recursion of its own context and there is a limit
+ * to the contexts that can nest; namely: task, softirq, hardirq, nmi. As there
+ * are at most 4 nesting levels, it can be encoded by a 2-bit number. Now
+ * we can encode the tail by combining the 2-bit nesting level with the cpu
+ * number. With one byte for the lock value and 3 bytes for the tail, only a
+ * 32-bit word is now needed. Even though we only need 1 bit for the lock,
+ * we extend it to a full byte to achieve better performance for architectures
+ * that support atomic byte write.
+ *
+ * We also change the first spinner to spin on the lock bit instead of its
+ * node; whereby avoiding the need to carry a node from lock to unlock, and
+ * preserving existing lock API. This also makes the unlock code simpler and
+ * faster.
+ *
+ * N.B. The current implementation only supports architectures that allow
+ *      atomic operations on smaller 8-bit and 16-bit data types.
+ *
+ */
+
+#include "mcs_spinlock.h"
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#define MAX_NODES      8
+#else
+#define MAX_NODES      4
+#endif
+
+/*
+ * Per-CPU queue node structures; we can never have more than 4 nested
+ * contexts: task, softirq, hardirq, nmi.
+ *
+ * Exactly fits one 64-byte cacheline on a 64-bit architecture.
+ *
+ * PV doubles the storage and uses the second cacheline for PV state.
+ */
+static DEFINE_PER_CPU_ALIGNED(struct mcs_spinlock, mcs_nodes[MAX_NODES]);
+
+/*
+ * We must be able to distinguish between no-tail and the tail at 0:0,
+ * therefore increment the cpu number by one.
+ */
+
+static inline u32 encode_tail(int cpu, int idx)
+{
+       u32 tail;
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+       BUG_ON(idx > 3);
+#endif
+       tail  = (cpu + 1) << _Q_TAIL_CPU_OFFSET;
+       tail |= idx << _Q_TAIL_IDX_OFFSET; /* assume < 4 */
+
+       return tail;
+}
+
+static inline struct mcs_spinlock *decode_tail(u32 tail)
+{
+       int cpu = (tail >> _Q_TAIL_CPU_OFFSET) - 1;
+       int idx = (tail &  _Q_TAIL_IDX_MASK) >> _Q_TAIL_IDX_OFFSET;
+
+       return per_cpu_ptr(&mcs_nodes[idx], cpu);
+}
+
+#define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)
+
+/*
+ * By using the whole 2nd least significant byte for the pending bit, we
+ * can allow better optimization of the lock acquisition for the pending
+ * bit holder.
+ *
+ * This internal structure is also used by the set_locked function which
+ * is not restricted to _Q_PENDING_BITS == 8.
+ */
+struct __qspinlock {
+       union {
+               atomic_t val;
+#ifdef __LITTLE_ENDIAN
+               struct {
+                       u8      locked;
+                       u8      pending;
+               };
+               struct {
+                       u16     locked_pending;
+                       u16     tail;
+               };
+#else
+               struct {
+                       u16     tail;
+                       u16     locked_pending;
+               };
+               struct {
+                       u8      reserved[2];
+                       u8      pending;
+                       u8      locked;
+               };
+#endif
+       };
+};
+
+#if _Q_PENDING_BITS == 8
+/**
+ * clear_pending_set_locked - take ownership and clear the pending bit.
+ * @lock: Pointer to queued spinlock structure
+ *
+ * *,1,0 -> *,0,1
+ *
+ * Lock stealing is not allowed if this function is used.
+ */
+static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
+{
+       struct __qspinlock *l = (void *)lock;
+
+       WRITE_ONCE(l->locked_pending, _Q_LOCKED_VAL);
+}
+
+/*
+ * xchg_tail - Put in the new queue tail code word & retrieve previous one
+ * @lock : Pointer to queued spinlock structure
+ * @tail : The new queue tail code word
+ * Return: The previous queue tail code word
+ *
+ * xchg(lock, tail)
+ *
+ * p,*,* -> n,*,* ; prev = xchg(lock, node)
+ */
+static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
+{
+       struct __qspinlock *l = (void *)lock;
+
+       return (u32)xchg(&l->tail, tail >> _Q_TAIL_OFFSET) << _Q_TAIL_OFFSET;
+}
+
+#else /* _Q_PENDING_BITS == 8 */
+
+/**
+ * clear_pending_set_locked - take ownership and clear the pending bit.
+ * @lock: Pointer to queued spinlock structure
+ *
+ * *,1,0 -> *,0,1
+ */
+static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
+{
+       atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val);
+}
+
+/**
+ * xchg_tail - Put in the new queue tail code word & retrieve previous one
+ * @lock : Pointer to queued spinlock structure
+ * @tail : The new queue tail code word
+ * Return: The previous queue tail code word
+ *
+ * xchg(lock, tail)
+ *
+ * p,*,* -> n,*,* ; prev = xchg(lock, node)
+ */
+static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
+{
+       u32 old, new, val = atomic_read(&lock->val);
+
+       for (;;) {
+               new = (val & _Q_LOCKED_PENDING_MASK) | tail;
+               old = atomic_cmpxchg(&lock->val, val, new);
+               if (old == val)
+                       break;
+
+               val = old;
+       }
+       return old;
+}
+#endif /* _Q_PENDING_BITS == 8 */
+
+/**
+ * set_locked - Set the lock bit and own the lock
+ * @lock: Pointer to queued spinlock structure
+ *
+ * *,*,0 -> *,0,1
+ */
+static __always_inline void set_locked(struct qspinlock *lock)
+{
+       struct __qspinlock *l = (void *)lock;
+
+       WRITE_ONCE(l->locked, _Q_LOCKED_VAL);
+}
+
+
+/*
+ * Generate the native code for queued_spin_unlock_slowpath(); provide NOPs for
+ * all the PV callbacks.
+ */
+
+static __always_inline void __pv_init_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_wait_node(struct mcs_spinlock *node) { }
+static __always_inline void __pv_kick_node(struct mcs_spinlock *node) { }
+
+static __always_inline void __pv_wait_head(struct qspinlock *lock,
+                                          struct mcs_spinlock *node) { }
+
+#define pv_enabled()           false
+
+#define pv_init_node           __pv_init_node
+#define pv_wait_node           __pv_wait_node
+#define pv_kick_node           __pv_kick_node
+#define pv_wait_head           __pv_wait_head
+
+#ifdef CONFIG_PARAVIRT_SPINLOCKS
+#define queued_spin_lock_slowpath      native_queued_spin_lock_slowpath
+#endif
+
+#endif /* _GEN_PV_LOCK_SLOWPATH */
+
+/**
+ * queued_spin_lock_slowpath - acquire the queued spinlock
+ * @lock: Pointer to queued spinlock structure
+ * @val: Current value of the queued spinlock 32-bit word
+ *
+ * (queue tail, pending bit, lock value)
+ *
+ *              fast     :    slow                                  :    unlock
+ *                       :                                          :
+ * uncontended  (0,0,0) -:--> (0,0,1) ------------------------------:--> (*,*,0)
+ *                       :       | ^--------.------.             /  :
+ *                       :       v           \      \            |  :
+ * pending               :    (0,1,1) +--> (0,1,0)   \           |  :
+ *                       :       | ^--'              |           |  :
+ *                       :       v                   |           |  :
+ * uncontended           :    (n,x,y) +--> (n,0,0) --'           |  :
+ *   queue               :       | ^--'                          |  :
+ *                       :       v                               |  :
+ * contended             :    (*,x,y) +--> (*,0,0) ---> (*,0,1) -'  :
+ *   queue               :         ^--'                             :
+ */
+void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
+{
+       struct mcs_spinlock *prev, *next, *node;
+       u32 new, old, tail;
+       int idx;
+
+       BUILD_BUG_ON(CONFIG_NR_CPUS >= (1U << _Q_TAIL_CPU_BITS));
+
+       if (pv_enabled())
+               goto queue;
+
+       if (virt_queued_spin_lock(lock))
+               return;
+
+       /*
+        * wait for in-progress pending->locked hand-overs
+        *
+        * 0,1,0 -> 0,0,1
+        */
+       if (val == _Q_PENDING_VAL) {
+               while ((val = atomic_read(&lock->val)) == _Q_PENDING_VAL)
+                       cpu_relax();
+       }
+
+       /*
+        * trylock || pending
+        *
+        * 0,0,0 -> 0,0,1 ; trylock
+        * 0,0,1 -> 0,1,1 ; pending
+        */
+       for (;;) {
+               /*
+                * If we observe any contention; queue.
+                */
+               if (val & ~_Q_LOCKED_MASK)
+                       goto queue;
+
+               new = _Q_LOCKED_VAL;
+               if (val == new)
+                       new |= _Q_PENDING_VAL;
+
+               old = atomic_cmpxchg(&lock->val, val, new);
+               if (old == val)
+                       break;
+
+               val = old;
+       }
+
+       /*
+        * we won the trylock
+        */
+       if (new == _Q_LOCKED_VAL)
+               return;
+
+       /*
+        * we're pending, wait for the owner to go away.
+        *
+        * *,1,1 -> *,1,0
+        *
+        * this wait loop must be a load-acquire such that we match the
+        * store-release that clears the locked bit and create lock
+        * sequentiality; this is because not all clear_pending_set_locked()
+        * implementations imply full barriers.
+        */
+       while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_MASK)
+               cpu_relax();
+
+       /*
+        * take ownership and clear the pending bit.
+        *
+        * *,1,0 -> *,0,1
+        */
+       clear_pending_set_locked(lock);
+       return;
+
+       /*
+        * End of pending bit optimistic spinning and beginning of MCS
+        * queuing.
+        */
+queue:
+       node = this_cpu_ptr(&mcs_nodes[0]);
+       idx = node->count++;
+       tail = encode_tail(smp_processor_id(), idx);
+
+       node += idx;
+       node->locked = 0;
+       node->next = NULL;
+       pv_init_node(node);
+
+       /*
+        * We touched a (possibly) cold cacheline in the per-cpu queue node;
+        * attempt the trylock once more in the hope someone let go while we
+        * weren't watching.
+        */
+       if (queued_spin_trylock(lock))
+               goto release;
+
+       /*
+        * We have already touched the queueing cacheline; don't bother with
+        * pending stuff.
+        *
+        * p,*,* -> n,*,*
+        */
+       old = xchg_tail(lock, tail);
+
+       /*
+        * if there was a previous node; link it and wait until reaching the
+        * head of the waitqueue.
+        */
+       if (old & _Q_TAIL_MASK) {
+               prev = decode_tail(old);
+               WRITE_ONCE(prev->next, node);
+
+               pv_wait_node(node);
+               arch_mcs_spin_lock_contended(&node->locked);
+       }
+
+       /*
+        * we're at the head of the waitqueue, wait for the owner & pending to
+        * go away.
+        *
+        * *,x,y -> *,0,0
+        *
+        * this wait loop must use a load-acquire such that we match the
+        * store-release that clears the locked bit and create lock
+        * sequentiality; this is because the set_locked() function below
+        * does not imply a full barrier.
+        *
+        */
+       pv_wait_head(lock, node);
+       while ((val = smp_load_acquire(&lock->val.counter)) & _Q_LOCKED_PENDING_MASK)
+               cpu_relax();
+
+       /*
+        * claim the lock:
+        *
+        * n,0,0 -> 0,0,1 : lock, uncontended
+        * *,0,0 -> *,0,1 : lock, contended
+        *
+        * If the queue head is the only one in the queue (lock value == tail),
+        * clear the tail code and grab the lock. Otherwise, we only need
+        * to grab the lock.
+        */
+       for (;;) {
+               if (val != tail) {
+                       set_locked(lock);
+                       break;
+               }
+               old = atomic_cmpxchg(&lock->val, val, _Q_LOCKED_VAL);
+               if (old == val)
+                       goto release;   /* No contention */
+
+               val = old;
+       }
+
+       /*
+        * contended path; wait for next, release.
+        */
+       while (!(next = READ_ONCE(node->next)))
+               cpu_relax();
+
+       arch_mcs_spin_unlock_contended(&next->locked);
+       pv_kick_node(next);
+
+release:
+       /*
+        * release the node
+        */
+       this_cpu_dec(mcs_nodes[0].count);
+}
+EXPORT_SYMBOL(queued_spin_lock_slowpath);
+
+/*
+ * Generate the paravirt code for queued_spin_unlock_slowpath().
+ */
+#if !defined(_GEN_PV_LOCK_SLOWPATH) && defined(CONFIG_PARAVIRT_SPINLOCKS)
+#define _GEN_PV_LOCK_SLOWPATH
+
+#undef  pv_enabled
+#define pv_enabled()   true
+
+#undef pv_init_node
+#undef pv_wait_node
+#undef pv_kick_node
+#undef pv_wait_head
+
+#undef  queued_spin_lock_slowpath
+#define queued_spin_lock_slowpath      __pv_queued_spin_lock_slowpath
+
+#include "qspinlock_paravirt.h"
+#include "qspinlock.c"
+
+#endif
diff --git a/kernel/locking/qspinlock_paravirt.h b/kernel/locking/qspinlock_paravirt.h
new file mode 100644 (file)
index 0000000..04ab181
--- /dev/null
@@ -0,0 +1,325 @@
+#ifndef _GEN_PV_LOCK_SLOWPATH
+#error "do not include this file"
+#endif
+
+#include <linux/hash.h>
+#include <linux/bootmem.h>
+
+/*
+ * Implement paravirt qspinlocks; the general idea is to halt the vcpus instead
+ * of spinning them.
+ *
+ * This relies on the architecture to provide two paravirt hypercalls:
+ *
+ *   pv_wait(u8 *ptr, u8 val) -- suspends the vcpu if *ptr == val
+ *   pv_kick(cpu)             -- wakes a suspended vcpu
+ *
+ * Using these we implement __pv_queued_spin_lock_slowpath() and
+ * __pv_queued_spin_unlock() to replace native_queued_spin_lock_slowpath() and
+ * native_queued_spin_unlock().
+ */
+
+#define _Q_SLOW_VAL    (3U << _Q_LOCKED_OFFSET)
+
+enum vcpu_state {
+       vcpu_running = 0,
+       vcpu_halted,
+};
+
+struct pv_node {
+       struct mcs_spinlock     mcs;
+       struct mcs_spinlock     __res[3];
+
+       int                     cpu;
+       u8                      state;
+};
+
+/*
+ * Lock and MCS node addresses hash table for fast lookup
+ *
+ * Hashing is done on a per-cacheline basis to minimize the need to access
+ * more than one cacheline.
+ *
+ * Dynamically allocate a hash table big enough to hold at least 4X the
+ * number of possible cpus in the system. Allocation is done on page
+ * granularity. So the minimum number of hash buckets should be at least
+ * 256 (64-bit) or 512 (32-bit) to fully utilize a 4k page.
+ *
+ * Since we should not be holding locks from NMI context (very rare indeed) the
+ * max load factor is 0.75, which is around the point where open addressing
+ * breaks down.
+ *
+ */
+struct pv_hash_entry {
+       struct qspinlock *lock;
+       struct pv_node   *node;
+};
+
+#define PV_HE_PER_LINE (SMP_CACHE_BYTES / sizeof(struct pv_hash_entry))
+#define PV_HE_MIN      (PAGE_SIZE / sizeof(struct pv_hash_entry))
+
+static struct pv_hash_entry *pv_lock_hash;
+static unsigned int pv_lock_hash_bits __read_mostly;
+
+/*
+ * Allocate memory for the PV qspinlock hash buckets
+ *
+ * This function should be called from the paravirt spinlock initialization
+ * routine.
+ */
+void __init __pv_init_lock_hash(void)
+{
+       int pv_hash_size = ALIGN(4 * num_possible_cpus(), PV_HE_PER_LINE);
+
+       if (pv_hash_size < PV_HE_MIN)
+               pv_hash_size = PV_HE_MIN;
+
+       /*
+        * Allocate space from bootmem which should be page-size aligned
+        * and hence cacheline aligned.
+        */
+       pv_lock_hash = alloc_large_system_hash("PV qspinlock",
+                                              sizeof(struct pv_hash_entry),
+                                              pv_hash_size, 0, HASH_EARLY,
+                                              &pv_lock_hash_bits, NULL,
+                                              pv_hash_size, pv_hash_size);
+}
+
+#define for_each_hash_entry(he, offset, hash)                                          \
+       for (hash &= ~(PV_HE_PER_LINE - 1), he = &pv_lock_hash[hash], offset = 0;       \
+            offset < (1 << pv_lock_hash_bits);                                         \
+            offset++, he = &pv_lock_hash[(hash + offset) & ((1 << pv_lock_hash_bits) - 1)])
+
+static struct qspinlock **pv_hash(struct qspinlock *lock, struct pv_node *node)
+{
+       unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits);
+       struct pv_hash_entry *he;
+
+       for_each_hash_entry(he, offset, hash) {
+               if (!cmpxchg(&he->lock, NULL, lock)) {
+                       WRITE_ONCE(he->node, node);
+                       return &he->lock;
+               }
+       }
+       /*
+        * Hard assume there is a free entry for us.
+        *
+        * This is guaranteed by ensuring every blocked lock only ever consumes
+        * a single entry, and since we only have 4 nesting levels per CPU
+        * and allocated 4*nr_possible_cpus(), this must be so.
+        *
+        * The single entry is guaranteed by having the lock owner unhash
+        * before it releases.
+        */
+       BUG();
+}
+
+static struct pv_node *pv_unhash(struct qspinlock *lock)
+{
+       unsigned long offset, hash = hash_ptr(lock, pv_lock_hash_bits);
+       struct pv_hash_entry *he;
+       struct pv_node *node;
+
+       for_each_hash_entry(he, offset, hash) {
+               if (READ_ONCE(he->lock) == lock) {
+                       node = READ_ONCE(he->node);
+                       WRITE_ONCE(he->lock, NULL);
+                       return node;
+               }
+       }
+       /*
+        * Hard assume we'll find an entry.
+        *
+        * This guarantees a limited lookup time and is itself guaranteed by
+        * having the lock owner do the unhash -- IFF the unlock sees the
+        * SLOW flag, there MUST be a hash entry.
+        */
+       BUG();
+}
+
+/*
+ * Initialize the PV part of the mcs_spinlock node.
+ */
+static void pv_init_node(struct mcs_spinlock *node)
+{
+       struct pv_node *pn = (struct pv_node *)node;
+
+       BUILD_BUG_ON(sizeof(struct pv_node) > 5*sizeof(struct mcs_spinlock));
+
+       pn->cpu = smp_processor_id();
+       pn->state = vcpu_running;
+}
+
+/*
+ * Wait for node->locked to become true, halt the vcpu after a short spin.
+ * pv_kick_node() is used to wake the vcpu again.
+ */
+static void pv_wait_node(struct mcs_spinlock *node)
+{
+       struct pv_node *pn = (struct pv_node *)node;
+       int loop;
+
+       for (;;) {
+               for (loop = SPIN_THRESHOLD; loop; loop--) {
+                       if (READ_ONCE(node->locked))
+                               return;
+                       cpu_relax();
+               }
+
+               /*
+                * Order pn->state vs pn->locked thusly:
+                *
+                * [S] pn->state = vcpu_halted    [S] next->locked = 1
+                *     MB                             MB
+                * [L] pn->locked               [RmW] pn->state = vcpu_running
+                *
+                * Matches the xchg() from pv_kick_node().
+                */
+               smp_store_mb(pn->state, vcpu_halted);
+
+               if (!READ_ONCE(node->locked))
+                       pv_wait(&pn->state, vcpu_halted);
+
+               /*
+                * Reset the vCPU state to avoid unncessary CPU kicking
+                */
+               WRITE_ONCE(pn->state, vcpu_running);
+
+               /*
+                * If the locked flag is still not set after wakeup, it is a
+                * spurious wakeup and the vCPU should wait again. However,
+                * there is a pretty high overhead for CPU halting and kicking.
+                * So it is better to spin for a while in the hope that the
+                * MCS lock will be released soon.
+                */
+       }
+       /*
+        * By now our node->locked should be 1 and our caller will not actually
+        * spin-wait for it. We do however rely on our caller to do a
+        * load-acquire for us.
+        */
+}
+
+/*
+ * Called after setting next->locked = 1, used to wake those stuck in
+ * pv_wait_node().
+ */
+static void pv_kick_node(struct mcs_spinlock *node)
+{
+       struct pv_node *pn = (struct pv_node *)node;
+
+       /*
+        * Note that because node->locked is already set, this actual
+        * mcs_spinlock entry could be re-used already.
+        *
+        * This should be fine however, kicking people for no reason is
+        * harmless.
+        *
+        * See the comment in pv_wait_node().
+        */
+       if (xchg(&pn->state, vcpu_running) == vcpu_halted)
+               pv_kick(pn->cpu);
+}
+
+/*
+ * Wait for l->locked to become clear; halt the vcpu after a short spin.
+ * __pv_queued_spin_unlock() will wake us.
+ */
+static void pv_wait_head(struct qspinlock *lock, struct mcs_spinlock *node)
+{
+       struct pv_node *pn = (struct pv_node *)node;
+       struct __qspinlock *l = (void *)lock;
+       struct qspinlock **lp = NULL;
+       int loop;
+
+       for (;;) {
+               for (loop = SPIN_THRESHOLD; loop; loop--) {
+                       if (!READ_ONCE(l->locked))
+                               return;
+                       cpu_relax();
+               }
+
+               WRITE_ONCE(pn->state, vcpu_halted);
+               if (!lp) { /* ONCE */
+                       lp = pv_hash(lock, pn);
+                       /*
+                        * lp must be set before setting _Q_SLOW_VAL
+                        *
+                        * [S] lp = lock                [RmW] l = l->locked = 0
+                        *     MB                             MB
+                        * [S] l->locked = _Q_SLOW_VAL  [L]   lp
+                        *
+                        * Matches the cmpxchg() in __pv_queued_spin_unlock().
+                        */
+                       if (!cmpxchg(&l->locked, _Q_LOCKED_VAL, _Q_SLOW_VAL)) {
+                               /*
+                                * The lock is free and _Q_SLOW_VAL has never
+                                * been set. Therefore we need to unhash before
+                                * getting the lock.
+                                */
+                               WRITE_ONCE(*lp, NULL);
+                               return;
+                       }
+               }
+               pv_wait(&l->locked, _Q_SLOW_VAL);
+
+               /*
+                * The unlocker should have freed the lock before kicking the
+                * CPU. So if the lock is still not free, it is a spurious
+                * wakeup and so the vCPU should wait again after spinning for
+                * a while.
+                */
+       }
+
+       /*
+        * Lock is unlocked now; the caller will acquire it without waiting.
+        * As with pv_wait_node() we rely on the caller to do a load-acquire
+        * for us.
+        */
+}
+
+/*
+ * PV version of the unlock function to be used in stead of
+ * queued_spin_unlock().
+ */
+__visible void __pv_queued_spin_unlock(struct qspinlock *lock)
+{
+       struct __qspinlock *l = (void *)lock;
+       struct pv_node *node;
+
+       /*
+        * We must not unlock if SLOW, because in that case we must first
+        * unhash. Otherwise it would be possible to have multiple @lock
+        * entries, which would be BAD.
+        */
+       if (likely(cmpxchg(&l->locked, _Q_LOCKED_VAL, 0) == _Q_LOCKED_VAL))
+               return;
+
+       /*
+        * Since the above failed to release, this must be the SLOW path.
+        * Therefore start by looking up the blocked node and unhashing it.
+        */
+       node = pv_unhash(lock);
+
+       /*
+        * Now that we have a reference to the (likely) blocked pv_node,
+        * release the lock.
+        */
+       smp_store_release(&l->locked, 0);
+
+       /*
+        * At this point the memory pointed at by lock can be freed/reused,
+        * however we can still use the pv_node to kick the CPU.
+        */
+       if (READ_ONCE(node->state) == vcpu_halted)
+               pv_kick(node->cpu);
+}
+/*
+ * Include the architecture specific callee-save thunk of the
+ * __pv_queued_spin_unlock(). This thunk is put together with
+ * __pv_queued_spin_unlock() near the top of the file to make sure
+ * that the callee-save thunk and the real unlock function are close
+ * to each other sharing consecutive instruction cachelines.
+ */
+#include <asm/qspinlock_paravirt.h>
+
index b73279367087ca779072b79a784f69224929c149..36573e96a47761c6cd3fc17463651f3e11028d59 100644 (file)
@@ -70,10 +70,10 @@ static void fixup_rt_mutex_waiters(struct rt_mutex *lock)
 }
 
 /*
- * We can speed up the acquire/release, if the architecture
- * supports cmpxchg and if there's no debugging state to be set up
+ * We can speed up the acquire/release, if there's no debugging state to be
+ * set up.
  */
-#if defined(__HAVE_ARCH_CMPXCHG) && !defined(CONFIG_DEBUG_RT_MUTEXES)
+#ifndef CONFIG_DEBUG_RT_MUTEXES
 # define rt_mutex_cmpxchg(l,c,n)       (cmpxchg(&l->owner, c, n) == c)
 static inline void mark_rt_mutex_waiters(struct rt_mutex *lock)
 {
@@ -265,15 +265,17 @@ struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
 }
 
 /*
- * Called by sched_setscheduler() to check whether the priority change
- * is overruled by a possible priority boosting.
+ * Called by sched_setscheduler() to get the priority which will be
+ * effective after the change.
  */
-int rt_mutex_check_prio(struct task_struct *task, int newprio)
+int rt_mutex_get_effective_prio(struct task_struct *task, int newprio)
 {
        if (!task_has_pi_waiters(task))
-               return 0;
+               return newprio;
 
-       return task_top_pi_waiter(task)->task->prio <= newprio;
+       if (task_top_pi_waiter(task)->task->prio <= newprio)
+               return task_top_pi_waiter(task)->task->prio;
+       return newprio;
 }
 
 /*
@@ -1180,11 +1182,8 @@ rt_mutex_slowlock(struct rt_mutex *lock, int state,
        set_current_state(state);
 
        /* Setup the timer, when timeout != NULL */
-       if (unlikely(timeout)) {
+       if (unlikely(timeout))
                hrtimer_start_expires(&timeout->timer, HRTIMER_MODE_ABS);
-               if (!hrtimer_active(&timeout->timer))
-                       timeout->task = NULL;
-       }
 
        ret = task_blocks_on_rt_mutex(lock, &waiter, current, chwalk);
 
@@ -1441,10 +1440,17 @@ EXPORT_SYMBOL_GPL(rt_mutex_timed_lock);
  *
  * @lock:      the rt_mutex to be locked
  *
+ * This function can only be called in thread context. It's safe to
+ * call it from atomic regions, but not from hard interrupt or soft
+ * interrupt context.
+ *
  * Returns 1 on success and 0 on contention
  */
 int __sched rt_mutex_trylock(struct rt_mutex *lock)
 {
+       if (WARN_ON(in_irq() || in_nmi() || in_serving_softirq()))
+               return 0;
+
        return rt_mutex_fasttrylock(lock, rt_mutex_slowtrylock);
 }
 EXPORT_SYMBOL_GPL(rt_mutex_trylock);
index 3417d0172a5d2e7cd69460ed4ef96c02f6c578d0..0f189714e457016ba7801c788a44673907c7746b 100644 (file)
@@ -409,11 +409,24 @@ done:
        return taken;
 }
 
+/*
+ * Return true if the rwsem has active spinner
+ */
+static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
+{
+       return osq_is_locked(&sem->osq);
+}
+
 #else
 static bool rwsem_optimistic_spin(struct rw_semaphore *sem)
 {
        return false;
 }
+
+static inline bool rwsem_has_spinner(struct rw_semaphore *sem)
+{
+       return false;
+}
 #endif
 
 /*
@@ -496,7 +509,38 @@ struct rw_semaphore *rwsem_wake(struct rw_semaphore *sem)
 {
        unsigned long flags;
 
+       /*
+        * If a spinner is present, it is not necessary to do the wakeup.
+        * Try to do wakeup only if the trylock succeeds to minimize
+        * spinlock contention which may introduce too much delay in the
+        * unlock operation.
+        *
+        *    spinning writer           up_write/up_read caller
+        *    ---------------           -----------------------
+        * [S]   osq_unlock()           [L]   osq
+        *       MB                           RMB
+        * [RmW] rwsem_try_write_lock() [RmW] spin_trylock(wait_lock)
+        *
+        * Here, it is important to make sure that there won't be a missed
+        * wakeup while the rwsem is free and the only spinning writer goes
+        * to sleep without taking the rwsem. Even when the spinning writer
+        * is just going to break out of the waiting loop, it will still do
+        * a trylock in rwsem_down_write_failed() before sleeping. IOW, if
+        * rwsem_has_spinner() is true, it will guarantee at least one
+        * trylock attempt on the rwsem later on.
+        */
+       if (rwsem_has_spinner(sem)) {
+               /*
+                * The smp_rmb() here is to make sure that the spinner
+                * state is consulted before reading the wait_lock.
+                */
+               smp_rmb();
+               if (!raw_spin_trylock_irqsave(&sem->wait_lock, flags))
+                       return sem;
+               goto locked;
+       }
        raw_spin_lock_irqsave(&sem->wait_lock, flags);
+locked:
 
        /* do nothing if list empty */
        if (!list_empty(&sem->wait_list))
index 42a1d2afb2173cd3c7c740098dd72d7a52bdb3f8..cfc9e843a924091e2be3d2a2bcef72a038737f64 100644 (file)
@@ -3370,6 +3370,9 @@ static int load_module(struct load_info *info, const char __user *uargs,
        module_bug_cleanup(mod);
        mutex_unlock(&module_mutex);
 
+       blocking_notifier_call_chain(&module_notify_list,
+                                    MODULE_STATE_GOING, mod);
+
        /* we can't deallocate the module until we clear memory protection */
        unset_module_init_ro_nx(mod);
        unset_module_core_ro_nx(mod);
index 8dbe27611ec399e42f8912d49708ff4e20bff73f..59e32684c23b58714ecb26215856f67866cfc58e 100644 (file)
@@ -241,6 +241,7 @@ rcu_torture_free(struct rcu_torture *p)
 struct rcu_torture_ops {
        int ttype;
        void (*init)(void);
+       void (*cleanup)(void);
        int (*readlock)(void);
        void (*read_delay)(struct torture_random_state *rrsp);
        void (*readunlock)(int idx);
@@ -477,10 +478,12 @@ static struct rcu_torture_ops rcu_busted_ops = {
  */
 
 DEFINE_STATIC_SRCU(srcu_ctl);
+static struct srcu_struct srcu_ctld;
+static struct srcu_struct *srcu_ctlp = &srcu_ctl;
 
-static int srcu_torture_read_lock(void) __acquires(&srcu_ctl)
+static int srcu_torture_read_lock(void) __acquires(srcu_ctlp)
 {
-       return srcu_read_lock(&srcu_ctl);
+       return srcu_read_lock(srcu_ctlp);
 }
 
 static void srcu_read_delay(struct torture_random_state *rrsp)
@@ -499,49 +502,49 @@ static void srcu_read_delay(struct torture_random_state *rrsp)
                rcu_read_delay(rrsp);
 }
 
-static void srcu_torture_read_unlock(int idx) __releases(&srcu_ctl)
+static void srcu_torture_read_unlock(int idx) __releases(srcu_ctlp)
 {
-       srcu_read_unlock(&srcu_ctl, idx);
+       srcu_read_unlock(srcu_ctlp, idx);
 }
 
 static unsigned long srcu_torture_completed(void)
 {
-       return srcu_batches_completed(&srcu_ctl);
+       return srcu_batches_completed(srcu_ctlp);
 }
 
 static void srcu_torture_deferred_free(struct rcu_torture *rp)
 {
-       call_srcu(&srcu_ctl, &rp->rtort_rcu, rcu_torture_cb);
+       call_srcu(srcu_ctlp, &rp->rtort_rcu, rcu_torture_cb);
 }
 
 static void srcu_torture_synchronize(void)
 {
-       synchronize_srcu(&srcu_ctl);
+       synchronize_srcu(srcu_ctlp);
 }
 
 static void srcu_torture_call(struct rcu_head *head,
                              void (*func)(struct rcu_head *head))
 {
-       call_srcu(&srcu_ctl, head, func);
+       call_srcu(srcu_ctlp, head, func);
 }
 
 static void srcu_torture_barrier(void)
 {
-       srcu_barrier(&srcu_ctl);
+       srcu_barrier(srcu_ctlp);
 }
 
 static void srcu_torture_stats(void)
 {
        int cpu;
-       int idx = srcu_ctl.completed & 0x1;
+       int idx = srcu_ctlp->completed & 0x1;
 
        pr_alert("%s%s per-CPU(idx=%d):",
                 torture_type, TORTURE_FLAG, idx);
        for_each_possible_cpu(cpu) {
                long c0, c1;
 
-               c0 = (long)per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[!idx];
-               c1 = (long)per_cpu_ptr(srcu_ctl.per_cpu_ref, cpu)->c[idx];
+               c0 = (long)per_cpu_ptr(srcu_ctlp->per_cpu_ref, cpu)->c[!idx];
+               c1 = (long)per_cpu_ptr(srcu_ctlp->per_cpu_ref, cpu)->c[idx];
                pr_cont(" %d(%ld,%ld)", cpu, c0, c1);
        }
        pr_cont("\n");
@@ -549,7 +552,7 @@ static void srcu_torture_stats(void)
 
 static void srcu_torture_synchronize_expedited(void)
 {
-       synchronize_srcu_expedited(&srcu_ctl);
+       synchronize_srcu_expedited(srcu_ctlp);
 }
 
 static struct rcu_torture_ops srcu_ops = {
@@ -569,6 +572,38 @@ static struct rcu_torture_ops srcu_ops = {
        .name           = "srcu"
 };
 
+static void srcu_torture_init(void)
+{
+       rcu_sync_torture_init();
+       WARN_ON(init_srcu_struct(&srcu_ctld));
+       srcu_ctlp = &srcu_ctld;
+}
+
+static void srcu_torture_cleanup(void)
+{
+       cleanup_srcu_struct(&srcu_ctld);
+       srcu_ctlp = &srcu_ctl; /* In case of a later rcutorture run. */
+}
+
+/* As above, but dynamically allocated. */
+static struct rcu_torture_ops srcud_ops = {
+       .ttype          = SRCU_FLAVOR,
+       .init           = srcu_torture_init,
+       .cleanup        = srcu_torture_cleanup,
+       .readlock       = srcu_torture_read_lock,
+       .read_delay     = srcu_read_delay,
+       .readunlock     = srcu_torture_read_unlock,
+       .started        = NULL,
+       .completed      = srcu_torture_completed,
+       .deferred_free  = srcu_torture_deferred_free,
+       .sync           = srcu_torture_synchronize,
+       .exp_sync       = srcu_torture_synchronize_expedited,
+       .call           = srcu_torture_call,
+       .cb_barrier     = srcu_torture_barrier,
+       .stats          = srcu_torture_stats,
+       .name           = "srcud"
+};
+
 /*
  * Definitions for sched torture testing.
  */
@@ -672,8 +707,8 @@ static void rcu_torture_boost_cb(struct rcu_head *head)
        struct rcu_boost_inflight *rbip =
                container_of(head, struct rcu_boost_inflight, rcu);
 
-       smp_mb(); /* Ensure RCU-core accesses precede clearing ->inflight */
-       rbip->inflight = 0;
+       /* Ensure RCU-core accesses precede clearing ->inflight */
+       smp_store_release(&rbip->inflight, 0);
 }
 
 static int rcu_torture_boost(void *arg)
@@ -710,9 +745,9 @@ static int rcu_torture_boost(void *arg)
                call_rcu_time = jiffies;
                while (ULONG_CMP_LT(jiffies, endtime)) {
                        /* If we don't have a callback in flight, post one. */
-                       if (!rbi.inflight) {
-                               smp_mb(); /* RCU core before ->inflight = 1. */
-                               rbi.inflight = 1;
+                       if (!smp_load_acquire(&rbi.inflight)) {
+                               /* RCU core before ->inflight = 1. */
+                               smp_store_release(&rbi.inflight, 1);
                                call_rcu(&rbi.rcu, rcu_torture_boost_cb);
                                if (jiffies - call_rcu_time >
                                         test_boost_duration * HZ - HZ / 2) {
@@ -751,11 +786,10 @@ checkwait:        stutter_wait("rcu_torture_boost");
        } while (!torture_must_stop());
 
        /* Clean up and exit. */
-       while (!kthread_should_stop() || rbi.inflight) {
+       while (!kthread_should_stop() || smp_load_acquire(&rbi.inflight)) {
                torture_shutdown_absorb("rcu_torture_boost");
                schedule_timeout_uninterruptible(1);
        }
-       smp_mb(); /* order accesses to ->inflight before stack-frame death. */
        destroy_rcu_head_on_stack(&rbi.rcu);
        torture_kthread_stopping("rcu_torture_boost");
        return 0;
@@ -1054,7 +1088,7 @@ static void rcu_torture_timer(unsigned long unused)
        p = rcu_dereference_check(rcu_torture_current,
                                  rcu_read_lock_bh_held() ||
                                  rcu_read_lock_sched_held() ||
-                                 srcu_read_lock_held(&srcu_ctl));
+                                 srcu_read_lock_held(srcu_ctlp));
        if (p == NULL) {
                /* Leave because rcu_torture_writer is not yet underway */
                cur_ops->readunlock(idx);
@@ -1128,7 +1162,7 @@ rcu_torture_reader(void *arg)
                p = rcu_dereference_check(rcu_torture_current,
                                          rcu_read_lock_bh_held() ||
                                          rcu_read_lock_sched_held() ||
-                                         srcu_read_lock_held(&srcu_ctl));
+                                         srcu_read_lock_held(srcu_ctlp));
                if (p == NULL) {
                        /* Wait for rcu_torture_writer to get underway */
                        cur_ops->readunlock(idx);
@@ -1413,12 +1447,15 @@ static int rcu_torture_barrier_cbs(void *arg)
        do {
                wait_event(barrier_cbs_wq[myid],
                           (newphase =
-                           ACCESS_ONCE(barrier_phase)) != lastphase ||
+                           smp_load_acquire(&barrier_phase)) != lastphase ||
                           torture_must_stop());
                lastphase = newphase;
-               smp_mb(); /* ensure barrier_phase load before ->call(). */
                if (torture_must_stop())
                        break;
+               /*
+                * The above smp_load_acquire() ensures barrier_phase load
+                * is ordered before the folloiwng ->call().
+                */
                cur_ops->call(&rcu, rcu_torture_barrier_cbf);
                if (atomic_dec_and_test(&barrier_cbs_count))
                        wake_up(&barrier_wq);
@@ -1439,8 +1476,8 @@ static int rcu_torture_barrier(void *arg)
        do {
                atomic_set(&barrier_cbs_invoked, 0);
                atomic_set(&barrier_cbs_count, n_barrier_cbs);
-               smp_mb(); /* Ensure barrier_phase after prior assignments. */
-               barrier_phase = !barrier_phase;
+               /* Ensure barrier_phase ordered after prior assignments. */
+               smp_store_release(&barrier_phase, !barrier_phase);
                for (i = 0; i < n_barrier_cbs; i++)
                        wake_up(&barrier_cbs_wq[i]);
                wait_event(barrier_wq,
@@ -1588,10 +1625,14 @@ rcu_torture_cleanup(void)
                        rcutorture_booster_cleanup(i);
        }
 
-       /* Wait for all RCU callbacks to fire.  */
-
+       /*
+        * Wait for all RCU callbacks to fire, then do flavor-specific
+        * cleanup operations.
+        */
        if (cur_ops->cb_barrier != NULL)
                cur_ops->cb_barrier();
+       if (cur_ops->cleanup != NULL)
+               cur_ops->cleanup();
 
        rcu_torture_stats_print();  /* -After- the stats thread is stopped! */
 
@@ -1668,8 +1709,8 @@ rcu_torture_init(void)
        int cpu;
        int firsterr = 0;
        static struct rcu_torture_ops *torture_ops[] = {
-               &rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &sched_ops,
-               RCUTORTURE_TASKS_OPS
+               &rcu_ops, &rcu_bh_ops, &rcu_busted_ops, &srcu_ops, &srcud_ops,
+               &sched_ops, RCUTORTURE_TASKS_OPS
        };
 
        if (!torture_init_begin(torture_type, verbose, &torture_runnable))
@@ -1701,7 +1742,7 @@ rcu_torture_init(void)
        if (nreaders >= 0) {
                nrealreaders = nreaders;
        } else {
-               nrealreaders = num_online_cpus() - 1;
+               nrealreaders = num_online_cpus() - 2 - nreaders;
                if (nrealreaders <= 0)
                        nrealreaders = 1;
        }
index cad76e76b4e7def42de7c379882c878084c5394c..fb33d35ee0b7c0ecdb6df0cb0562050abcc6707b 100644 (file)
@@ -151,7 +151,7 @@ static unsigned long srcu_readers_seq_idx(struct srcu_struct *sp, int idx)
        unsigned long t;
 
        for_each_possible_cpu(cpu) {
-               t = ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->seq[idx]);
+               t = READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->seq[idx]);
                sum += t;
        }
        return sum;
@@ -168,7 +168,7 @@ static unsigned long srcu_readers_active_idx(struct srcu_struct *sp, int idx)
        unsigned long t;
 
        for_each_possible_cpu(cpu) {
-               t = ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx]);
+               t = READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[idx]);
                sum += t;
        }
        return sum;
@@ -265,8 +265,8 @@ static int srcu_readers_active(struct srcu_struct *sp)
        unsigned long sum = 0;
 
        for_each_possible_cpu(cpu) {
-               sum += ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[0]);
-               sum += ACCESS_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[1]);
+               sum += READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[0]);
+               sum += READ_ONCE(per_cpu_ptr(sp->per_cpu_ref, cpu)->c[1]);
        }
        return sum;
 }
@@ -296,7 +296,7 @@ int __srcu_read_lock(struct srcu_struct *sp)
 {
        int idx;
 
-       idx = ACCESS_ONCE(sp->completed) & 0x1;
+       idx = READ_ONCE(sp->completed) & 0x1;
        preempt_disable();
        __this_cpu_inc(sp->per_cpu_ref->c[idx]);
        smp_mb(); /* B */  /* Avoid leaking the critical section. */
index 069742d61c68873a3fce7bf5d7df2e77d5a59958..591af0cb7b9f4e7a25abbab58f3a7ab1fe3cb4fb 100644 (file)
@@ -49,39 +49,6 @@ static void __call_rcu(struct rcu_head *head,
 
 #include "tiny_plugin.h"
 
-/*
- * Enter idle, which is an extended quiescent state if we have fully
- * entered that mode.
- */
-void rcu_idle_enter(void)
-{
-}
-EXPORT_SYMBOL_GPL(rcu_idle_enter);
-
-/*
- * Exit an interrupt handler towards idle.
- */
-void rcu_irq_exit(void)
-{
-}
-EXPORT_SYMBOL_GPL(rcu_irq_exit);
-
-/*
- * Exit idle, so that we are no longer in an extended quiescent state.
- */
-void rcu_idle_exit(void)
-{
-}
-EXPORT_SYMBOL_GPL(rcu_idle_exit);
-
-/*
- * Enter an interrupt handler, moving away from idle.
- */
-void rcu_irq_enter(void)
-{
-}
-EXPORT_SYMBOL_GPL(rcu_irq_enter);
-
 #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE)
 
 /*
@@ -170,6 +137,11 @@ static void __rcu_process_callbacks(struct rcu_ctrlblk *rcp)
 
        /* Move the ready-to-invoke callbacks to a local list. */
        local_irq_save(flags);
+       if (rcp->donetail == &rcp->rcucblist) {
+               /* No callbacks ready, so just leave. */
+               local_irq_restore(flags);
+               return;
+       }
        RCU_TRACE(trace_rcu_batch_start(rcp->name, 0, rcp->qlen, -1));
        list = rcp->rcucblist;
        rcp->rcucblist = *rcp->donetail;
index f94e209a10d615a5a4f7d379e623918f533ce814..e492a5253e0f10c94da7056efd8f42ba2c1a394c 100644 (file)
@@ -144,16 +144,17 @@ static void check_cpu_stall(struct rcu_ctrlblk *rcp)
                return;
        rcp->ticks_this_gp++;
        j = jiffies;
-       js = ACCESS_ONCE(rcp->jiffies_stall);
+       js = READ_ONCE(rcp->jiffies_stall);
        if (rcp->rcucblist && ULONG_CMP_GE(j, js)) {
                pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n",
                       rcp->name, rcp->ticks_this_gp, DYNTICK_TASK_EXIT_IDLE,
                       jiffies - rcp->gp_start, rcp->qlen);
                dump_stack();
-               ACCESS_ONCE(rcp->jiffies_stall) = jiffies +
-                       3 * rcu_jiffies_till_stall_check() + 3;
+               WRITE_ONCE(rcp->jiffies_stall,
+                          jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
        } else if (ULONG_CMP_GE(j, js)) {
-               ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
+               WRITE_ONCE(rcp->jiffies_stall,
+                          jiffies + rcu_jiffies_till_stall_check());
        }
 }
 
@@ -161,7 +162,8 @@ static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
 {
        rcp->ticks_this_gp = 0;
        rcp->gp_start = jiffies;
-       ACCESS_ONCE(rcp->jiffies_stall) = jiffies + rcu_jiffies_till_stall_check();
+       WRITE_ONCE(rcp->jiffies_stall,
+                  jiffies + rcu_jiffies_till_stall_check());
 }
 
 static void check_cpu_stalls(void)
index 8cf7304b2867f5a113807afb0bd5dc0a3bd3cfc0..add042926a6608258564c72ea5a33dd204428df6 100644 (file)
@@ -91,7 +91,7 @@ static const char *tp_##sname##_varname __used __tracepoint_string = sname##_var
 
 #define RCU_STATE_INITIALIZER(sname, sabbr, cr) \
 DEFINE_RCU_TPS(sname) \
-DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data); \
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct rcu_data, sname##_data); \
 struct rcu_state sname##_state = { \
        .level = { &sname##_state.node[0] }, \
        .rda = &sname##_data, \
@@ -110,11 +110,18 @@ struct rcu_state sname##_state = { \
 RCU_STATE_INITIALIZER(rcu_sched, 's', call_rcu_sched);
 RCU_STATE_INITIALIZER(rcu_bh, 'b', call_rcu_bh);
 
-static struct rcu_state *rcu_state_p;
+static struct rcu_state *const rcu_state_p;
+static struct rcu_data __percpu *const rcu_data_p;
 LIST_HEAD(rcu_struct_flavors);
 
-/* Increase (but not decrease) the CONFIG_RCU_FANOUT_LEAF at boot time. */
-static int rcu_fanout_leaf = CONFIG_RCU_FANOUT_LEAF;
+/* Dump rcu_node combining tree at boot to verify correct setup. */
+static bool dump_tree;
+module_param(dump_tree, bool, 0444);
+/* Control rcu_node-tree auto-balancing at boot time. */
+static bool rcu_fanout_exact;
+module_param(rcu_fanout_exact, bool, 0444);
+/* Increase (but not decrease) the RCU_FANOUT_LEAF at boot time. */
+static int rcu_fanout_leaf = RCU_FANOUT_LEAF;
 module_param(rcu_fanout_leaf, int, 0444);
 int rcu_num_lvls __read_mostly = RCU_NUM_LVLS;
 static int num_rcu_lvl[] = {  /* Number of rcu_nodes at specified level. */
@@ -159,17 +166,46 @@ static void invoke_rcu_core(void);
 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp);
 
 /* rcuc/rcub kthread realtime priority */
+#ifdef CONFIG_RCU_KTHREAD_PRIO
 static int kthread_prio = CONFIG_RCU_KTHREAD_PRIO;
+#else /* #ifdef CONFIG_RCU_KTHREAD_PRIO */
+static int kthread_prio = IS_ENABLED(CONFIG_RCU_BOOST) ? 1 : 0;
+#endif /* #else #ifdef CONFIG_RCU_KTHREAD_PRIO */
 module_param(kthread_prio, int, 0644);
 
 /* Delay in jiffies for grace-period initialization delays, debug only. */
+
+#ifdef CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT
+static int gp_preinit_delay = CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT_DELAY;
+module_param(gp_preinit_delay, int, 0644);
+#else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT */
+static const int gp_preinit_delay;
+#endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT */
+
 #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT
 static int gp_init_delay = CONFIG_RCU_TORTURE_TEST_SLOW_INIT_DELAY;
 module_param(gp_init_delay, int, 0644);
 #else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */
 static const int gp_init_delay;
 #endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_INIT */
-#define PER_RCU_NODE_PERIOD 10 /* Number of grace periods between delays. */
+
+#ifdef CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP
+static int gp_cleanup_delay = CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP_DELAY;
+module_param(gp_cleanup_delay, int, 0644);
+#else /* #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP */
+static const int gp_cleanup_delay;
+#endif /* #else #ifdef CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP */
+
+/*
+ * Number of grace periods between delays, normalized by the duration of
+ * the delay.  The longer the the delay, the more the grace periods between
+ * each delay.  The reason for this normalization is that it means that,
+ * for non-zero delays, the overall slowdown of grace periods is constant
+ * regardless of the duration of the delay.  This arrangement balances
+ * the need for long delays to increase some race probabilities with the
+ * need for fast grace periods to increase other race probabilities.
+ */
+#define PER_RCU_NODE_PERIOD 3  /* Number of grace periods between delays. */
 
 /*
  * Track the rcutorture test sequence number and the update version
@@ -191,17 +227,17 @@ unsigned long rcutorture_vernum;
  */
 unsigned long rcu_rnp_online_cpus(struct rcu_node *rnp)
 {
-       return ACCESS_ONCE(rnp->qsmaskinitnext);
+       return READ_ONCE(rnp->qsmaskinitnext);
 }
 
 /*
- * Return true if an RCU grace period is in progress.  The ACCESS_ONCE()s
+ * Return true if an RCU grace period is in progress.  The READ_ONCE()s
  * permit this function to be invoked without holding the root rcu_node
  * structure's ->lock, but of course results can be subject to change.
  */
 static int rcu_gp_in_progress(struct rcu_state *rsp)
 {
-       return ACCESS_ONCE(rsp->completed) != ACCESS_ONCE(rsp->gpnum);
+       return READ_ONCE(rsp->completed) != READ_ONCE(rsp->gpnum);
 }
 
 /*
@@ -278,8 +314,8 @@ static void rcu_momentary_dyntick_idle(void)
                if (!(resched_mask & rsp->flavor_mask))
                        continue;
                smp_mb(); /* rcu_sched_qs_mask before cond_resched_completed. */
-               if (ACCESS_ONCE(rdp->mynode->completed) !=
-                   ACCESS_ONCE(rdp->cond_resched_completed))
+               if (READ_ONCE(rdp->mynode->completed) !=
+                   READ_ONCE(rdp->cond_resched_completed))
                        continue;
 
                /*
@@ -491,9 +527,9 @@ void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
                break;
        }
        if (rsp != NULL) {
-               *flags = ACCESS_ONCE(rsp->gp_flags);
-               *gpnum = ACCESS_ONCE(rsp->gpnum);
-               *completed = ACCESS_ONCE(rsp->completed);
+               *flags = READ_ONCE(rsp->gp_flags);
+               *gpnum = READ_ONCE(rsp->gpnum);
+               *completed = READ_ONCE(rsp->completed);
                return;
        }
        *flags = 0;
@@ -539,10 +575,10 @@ static struct rcu_node *rcu_get_root(struct rcu_state *rsp)
 static int rcu_future_needs_gp(struct rcu_state *rsp)
 {
        struct rcu_node *rnp = rcu_get_root(rsp);
-       int idx = (ACCESS_ONCE(rnp->completed) + 1) & 0x1;
+       int idx = (READ_ONCE(rnp->completed) + 1) & 0x1;
        int *fp = &rnp->need_future_gp[idx];
 
-       return ACCESS_ONCE(*fp);
+       return READ_ONCE(*fp);
 }
 
 /*
@@ -565,7 +601,7 @@ cpu_needs_another_gp(struct rcu_state *rsp, struct rcu_data *rdp)
                return 1;  /* Yes, this CPU has newly registered callbacks. */
        for (i = RCU_WAIT_TAIL; i < RCU_NEXT_TAIL; i++)
                if (rdp->nxttail[i - 1] != rdp->nxttail[i] &&
-                   ULONG_CMP_LT(ACCESS_ONCE(rsp->completed),
+                   ULONG_CMP_LT(READ_ONCE(rsp->completed),
                                 rdp->nxtcompleted[i]))
                        return 1;  /* Yes, CBs for future grace period. */
        return 0; /* No grace period needed. */
@@ -585,7 +621,8 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
        struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
 
        trace_rcu_dyntick(TPS("Start"), oldval, rdtp->dynticks_nesting);
-       if (!user && !is_idle_task(current)) {
+       if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
+           !user && !is_idle_task(current)) {
                struct task_struct *idle __maybe_unused =
                        idle_task(smp_processor_id());
 
@@ -604,7 +641,8 @@ static void rcu_eqs_enter_common(long long oldval, bool user)
        smp_mb__before_atomic();  /* See above. */
        atomic_inc(&rdtp->dynticks);
        smp_mb__after_atomic();  /* Force ordering with next sojourn. */
-       WARN_ON_ONCE(atomic_read(&rdtp->dynticks) & 0x1);
+       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
+                    atomic_read(&rdtp->dynticks) & 0x1);
        rcu_dynticks_task_enter();
 
        /*
@@ -630,7 +668,8 @@ static void rcu_eqs_enter(bool user)
 
        rdtp = this_cpu_ptr(&rcu_dynticks);
        oldval = rdtp->dynticks_nesting;
-       WARN_ON_ONCE((oldval & DYNTICK_TASK_NEST_MASK) == 0);
+       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
+                    (oldval & DYNTICK_TASK_NEST_MASK) == 0);
        if ((oldval & DYNTICK_TASK_NEST_MASK) == DYNTICK_TASK_NEST_VALUE) {
                rdtp->dynticks_nesting = 0;
                rcu_eqs_enter_common(oldval, user);
@@ -703,7 +742,8 @@ void rcu_irq_exit(void)
        rdtp = this_cpu_ptr(&rcu_dynticks);
        oldval = rdtp->dynticks_nesting;
        rdtp->dynticks_nesting--;
-       WARN_ON_ONCE(rdtp->dynticks_nesting < 0);
+       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
+                    rdtp->dynticks_nesting < 0);
        if (rdtp->dynticks_nesting)
                trace_rcu_dyntick(TPS("--="), oldval, rdtp->dynticks_nesting);
        else
@@ -728,10 +768,12 @@ static void rcu_eqs_exit_common(long long oldval, int user)
        atomic_inc(&rdtp->dynticks);
        /* CPUs seeing atomic_inc() must see later RCU read-side crit sects */
        smp_mb__after_atomic();  /* See above. */
-       WARN_ON_ONCE(!(atomic_read(&rdtp->dynticks) & 0x1));
+       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
+                    !(atomic_read(&rdtp->dynticks) & 0x1));
        rcu_cleanup_after_idle();
        trace_rcu_dyntick(TPS("End"), oldval, rdtp->dynticks_nesting);
-       if (!user && !is_idle_task(current)) {
+       if (IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
+           !user && !is_idle_task(current)) {
                struct task_struct *idle __maybe_unused =
                        idle_task(smp_processor_id());
 
@@ -755,7 +797,7 @@ static void rcu_eqs_exit(bool user)
 
        rdtp = this_cpu_ptr(&rcu_dynticks);
        oldval = rdtp->dynticks_nesting;
-       WARN_ON_ONCE(oldval < 0);
+       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) && oldval < 0);
        if (oldval & DYNTICK_TASK_NEST_MASK) {
                rdtp->dynticks_nesting += DYNTICK_TASK_NEST_VALUE;
        } else {
@@ -828,7 +870,8 @@ void rcu_irq_enter(void)
        rdtp = this_cpu_ptr(&rcu_dynticks);
        oldval = rdtp->dynticks_nesting;
        rdtp->dynticks_nesting++;
-       WARN_ON_ONCE(rdtp->dynticks_nesting == 0);
+       WARN_ON_ONCE(IS_ENABLED(CONFIG_RCU_EQS_DEBUG) &&
+                    rdtp->dynticks_nesting == 0);
        if (oldval)
                trace_rcu_dyntick(TPS("++="), oldval, rdtp->dynticks_nesting);
        else
@@ -1011,9 +1054,9 @@ static int dyntick_save_progress_counter(struct rcu_data *rdp,
                trace_rcu_fqs(rdp->rsp->name, rdp->gpnum, rdp->cpu, TPS("dti"));
                return 1;
        } else {
-               if (ULONG_CMP_LT(ACCESS_ONCE(rdp->gpnum) + ULONG_MAX / 4,
+               if (ULONG_CMP_LT(READ_ONCE(rdp->gpnum) + ULONG_MAX / 4,
                                 rdp->mynode->gpnum))
-                       ACCESS_ONCE(rdp->gpwrap) = true;
+                       WRITE_ONCE(rdp->gpwrap, true);
                return 0;
        }
 }
@@ -1093,12 +1136,12 @@ static int rcu_implicit_dynticks_qs(struct rcu_data *rdp,
        if (ULONG_CMP_GE(jiffies,
                         rdp->rsp->gp_start + jiffies_till_sched_qs) ||
            ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
-               if (!(ACCESS_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
-                       ACCESS_ONCE(rdp->cond_resched_completed) =
-                               ACCESS_ONCE(rdp->mynode->completed);
+               if (!(READ_ONCE(*rcrmp) & rdp->rsp->flavor_mask)) {
+                       WRITE_ONCE(rdp->cond_resched_completed,
+                                  READ_ONCE(rdp->mynode->completed));
                        smp_mb(); /* ->cond_resched_completed before *rcrmp. */
-                       ACCESS_ONCE(*rcrmp) =
-                               ACCESS_ONCE(*rcrmp) + rdp->rsp->flavor_mask;
+                       WRITE_ONCE(*rcrmp,
+                                  READ_ONCE(*rcrmp) + rdp->rsp->flavor_mask);
                        resched_cpu(rdp->cpu);  /* Force CPU into scheduler. */
                        rdp->rsp->jiffies_resched += 5; /* Enable beating. */
                } else if (ULONG_CMP_GE(jiffies, rdp->rsp->jiffies_resched)) {
@@ -1119,9 +1162,9 @@ static void record_gp_stall_check_time(struct rcu_state *rsp)
        rsp->gp_start = j;
        smp_wmb(); /* Record start time before stall time. */
        j1 = rcu_jiffies_till_stall_check();
-       ACCESS_ONCE(rsp->jiffies_stall) = j + j1;
+       WRITE_ONCE(rsp->jiffies_stall, j + j1);
        rsp->jiffies_resched = j + j1 / 2;
-       rsp->n_force_qs_gpstart = ACCESS_ONCE(rsp->n_force_qs);
+       rsp->n_force_qs_gpstart = READ_ONCE(rsp->n_force_qs);
 }
 
 /*
@@ -1133,10 +1176,11 @@ static void rcu_check_gp_kthread_starvation(struct rcu_state *rsp)
        unsigned long j;
 
        j = jiffies;
-       gpa = ACCESS_ONCE(rsp->gp_activity);
+       gpa = READ_ONCE(rsp->gp_activity);
        if (j - gpa > 2 * HZ)
-               pr_err("%s kthread starved for %ld jiffies!\n",
-                      rsp->name, j - gpa);
+               pr_err("%s kthread starved for %ld jiffies! g%lu c%lu f%#x\n",
+                      rsp->name, j - gpa,
+                      rsp->gpnum, rsp->completed, rsp->gp_flags);
 }
 
 /*
@@ -1173,12 +1217,13 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
        /* Only let one CPU complain about others per time interval. */
 
        raw_spin_lock_irqsave(&rnp->lock, flags);
-       delta = jiffies - ACCESS_ONCE(rsp->jiffies_stall);
+       delta = jiffies - READ_ONCE(rsp->jiffies_stall);
        if (delta < RCU_STALL_RAT_DELAY || !rcu_gp_in_progress(rsp)) {
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
                return;
        }
-       ACCESS_ONCE(rsp->jiffies_stall) = jiffies + 3 * rcu_jiffies_till_stall_check() + 3;
+       WRITE_ONCE(rsp->jiffies_stall,
+                  jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
 
        /*
@@ -1212,12 +1257,12 @@ static void print_other_cpu_stall(struct rcu_state *rsp, unsigned long gpnum)
        if (ndetected) {
                rcu_dump_cpu_stacks(rsp);
        } else {
-               if (ACCESS_ONCE(rsp->gpnum) != gpnum ||
-                   ACCESS_ONCE(rsp->completed) == gpnum) {
+               if (READ_ONCE(rsp->gpnum) != gpnum ||
+                   READ_ONCE(rsp->completed) == gpnum) {
                        pr_err("INFO: Stall ended before state dump start\n");
                } else {
                        j = jiffies;
-                       gpa = ACCESS_ONCE(rsp->gp_activity);
+                       gpa = READ_ONCE(rsp->gp_activity);
                        pr_err("All QSes seen, last %s kthread activity %ld (%ld-%ld), jiffies_till_next_fqs=%ld, root ->qsmask %#lx\n",
                               rsp->name, j - gpa, j, gpa,
                               jiffies_till_next_fqs,
@@ -1262,9 +1307,9 @@ static void print_cpu_stall(struct rcu_state *rsp)
        rcu_dump_cpu_stacks(rsp);
 
        raw_spin_lock_irqsave(&rnp->lock, flags);
-       if (ULONG_CMP_GE(jiffies, ACCESS_ONCE(rsp->jiffies_stall)))
-               ACCESS_ONCE(rsp->jiffies_stall) = jiffies +
-                                    3 * rcu_jiffies_till_stall_check() + 3;
+       if (ULONG_CMP_GE(jiffies, READ_ONCE(rsp->jiffies_stall)))
+               WRITE_ONCE(rsp->jiffies_stall,
+                          jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
 
        /*
@@ -1307,20 +1352,20 @@ static void check_cpu_stall(struct rcu_state *rsp, struct rcu_data *rdp)
         * Given this check, comparisons of jiffies, rsp->jiffies_stall,
         * and rsp->gp_start suffice to forestall false positives.
         */
-       gpnum = ACCESS_ONCE(rsp->gpnum);
+       gpnum = READ_ONCE(rsp->gpnum);
        smp_rmb(); /* Pick up ->gpnum first... */
-       js = ACCESS_ONCE(rsp->jiffies_stall);
+       js = READ_ONCE(rsp->jiffies_stall);
        smp_rmb(); /* ...then ->jiffies_stall before the rest... */
-       gps = ACCESS_ONCE(rsp->gp_start);
+       gps = READ_ONCE(rsp->gp_start);
        smp_rmb(); /* ...and finally ->gp_start before ->completed. */
-       completed = ACCESS_ONCE(rsp->completed);
+       completed = READ_ONCE(rsp->completed);
        if (ULONG_CMP_GE(completed, gpnum) ||
            ULONG_CMP_LT(j, js) ||
            ULONG_CMP_GE(gps, js))
                return; /* No stall or GP completed since entering function. */
        rnp = rdp->mynode;
        if (rcu_gp_in_progress(rsp) &&
-           (ACCESS_ONCE(rnp->qsmask) & rdp->grpmask)) {
+           (READ_ONCE(rnp->qsmask) & rdp->grpmask)) {
 
                /* We haven't checked in, so go dump stack. */
                print_cpu_stall(rsp);
@@ -1347,7 +1392,7 @@ void rcu_cpu_stall_reset(void)
        struct rcu_state *rsp;
 
        for_each_rcu_flavor(rsp)
-               ACCESS_ONCE(rsp->jiffies_stall) = jiffies + ULONG_MAX / 2;
+               WRITE_ONCE(rsp->jiffies_stall, jiffies + ULONG_MAX / 2);
 }
 
 /*
@@ -1457,7 +1502,7 @@ rcu_start_future_gp(struct rcu_node *rnp, struct rcu_data *rdp,
         * doing some extra useless work.
         */
        if (rnp->gpnum != rnp->completed ||
-           ACCESS_ONCE(rnp_root->gpnum) != ACCESS_ONCE(rnp_root->completed)) {
+           READ_ONCE(rnp_root->gpnum) != READ_ONCE(rnp_root->completed)) {
                rnp->need_future_gp[c & 0x1]++;
                trace_rcu_future_gp(rnp, rdp, c, TPS("Startedleaf"));
                goto out;
@@ -1542,7 +1587,7 @@ static int rcu_future_gp_cleanup(struct rcu_state *rsp, struct rcu_node *rnp)
 static void rcu_gp_kthread_wake(struct rcu_state *rsp)
 {
        if (current == rsp->gp_kthread ||
-           !ACCESS_ONCE(rsp->gp_flags) ||
+           !READ_ONCE(rsp->gp_flags) ||
            !rsp->gp_kthread)
                return;
        wake_up(&rsp->gp_wq);
@@ -1677,7 +1722,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
 
        /* Handle the ends of any preceding grace periods first. */
        if (rdp->completed == rnp->completed &&
-           !unlikely(ACCESS_ONCE(rdp->gpwrap))) {
+           !unlikely(READ_ONCE(rdp->gpwrap))) {
 
                /* No grace period end, so just accelerate recent callbacks. */
                ret = rcu_accelerate_cbs(rsp, rnp, rdp);
@@ -1692,7 +1737,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
                trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuend"));
        }
 
-       if (rdp->gpnum != rnp->gpnum || unlikely(ACCESS_ONCE(rdp->gpwrap))) {
+       if (rdp->gpnum != rnp->gpnum || unlikely(READ_ONCE(rdp->gpwrap))) {
                /*
                 * If the current grace period is waiting for this CPU,
                 * set up to detect a quiescent state, otherwise don't
@@ -1704,7 +1749,7 @@ static bool __note_gp_changes(struct rcu_state *rsp, struct rcu_node *rnp,
                rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
                rdp->qs_pending = !!(rnp->qsmask & rdp->grpmask);
                zero_cpu_stall_ticks(rdp);
-               ACCESS_ONCE(rdp->gpwrap) = false;
+               WRITE_ONCE(rdp->gpwrap, false);
        }
        return ret;
 }
@@ -1717,9 +1762,9 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
 
        local_irq_save(flags);
        rnp = rdp->mynode;
-       if ((rdp->gpnum == ACCESS_ONCE(rnp->gpnum) &&
-            rdp->completed == ACCESS_ONCE(rnp->completed) &&
-            !unlikely(ACCESS_ONCE(rdp->gpwrap))) || /* w/out lock. */
+       if ((rdp->gpnum == READ_ONCE(rnp->gpnum) &&
+            rdp->completed == READ_ONCE(rnp->completed) &&
+            !unlikely(READ_ONCE(rdp->gpwrap))) || /* w/out lock. */
            !raw_spin_trylock(&rnp->lock)) { /* irqs already off, so later. */
                local_irq_restore(flags);
                return;
@@ -1731,6 +1776,13 @@ static void note_gp_changes(struct rcu_state *rsp, struct rcu_data *rdp)
                rcu_gp_kthread_wake(rsp);
 }
 
+static void rcu_gp_slow(struct rcu_state *rsp, int delay)
+{
+       if (delay > 0 &&
+           !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD * delay)))
+               schedule_timeout_uninterruptible(delay);
+}
+
 /*
  * Initialize a new grace period.  Return 0 if no grace period required.
  */
@@ -1740,15 +1792,15 @@ static int rcu_gp_init(struct rcu_state *rsp)
        struct rcu_data *rdp;
        struct rcu_node *rnp = rcu_get_root(rsp);
 
-       ACCESS_ONCE(rsp->gp_activity) = jiffies;
+       WRITE_ONCE(rsp->gp_activity, jiffies);
        raw_spin_lock_irq(&rnp->lock);
        smp_mb__after_unlock_lock();
-       if (!ACCESS_ONCE(rsp->gp_flags)) {
+       if (!READ_ONCE(rsp->gp_flags)) {
                /* Spurious wakeup, tell caller to go back to sleep.  */
                raw_spin_unlock_irq(&rnp->lock);
                return 0;
        }
-       ACCESS_ONCE(rsp->gp_flags) = 0; /* Clear all flags: New grace period. */
+       WRITE_ONCE(rsp->gp_flags, 0); /* Clear all flags: New grace period. */
 
        if (WARN_ON_ONCE(rcu_gp_in_progress(rsp))) {
                /*
@@ -1773,6 +1825,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
         * will handle subsequent offline CPUs.
         */
        rcu_for_each_leaf_node(rsp, rnp) {
+               rcu_gp_slow(rsp, gp_preinit_delay);
                raw_spin_lock_irq(&rnp->lock);
                smp_mb__after_unlock_lock();
                if (rnp->qsmaskinit == rnp->qsmaskinitnext &&
@@ -1829,14 +1882,15 @@ static int rcu_gp_init(struct rcu_state *rsp)
         * process finishes, because this kthread handles both.
         */
        rcu_for_each_node_breadth_first(rsp, rnp) {
+               rcu_gp_slow(rsp, gp_init_delay);
                raw_spin_lock_irq(&rnp->lock);
                smp_mb__after_unlock_lock();
                rdp = this_cpu_ptr(rsp->rda);
                rcu_preempt_check_blocked_tasks(rnp);
                rnp->qsmask = rnp->qsmaskinit;
-               ACCESS_ONCE(rnp->gpnum) = rsp->gpnum;
+               WRITE_ONCE(rnp->gpnum, rsp->gpnum);
                if (WARN_ON_ONCE(rnp->completed != rsp->completed))
-                       ACCESS_ONCE(rnp->completed) = rsp->completed;
+                       WRITE_ONCE(rnp->completed, rsp->completed);
                if (rnp == rdp->mynode)
                        (void)__note_gp_changes(rsp, rnp, rdp);
                rcu_preempt_boost_start_gp(rnp);
@@ -1845,10 +1899,7 @@ static int rcu_gp_init(struct rcu_state *rsp)
                                            rnp->grphi, rnp->qsmask);
                raw_spin_unlock_irq(&rnp->lock);
                cond_resched_rcu_qs();
-               ACCESS_ONCE(rsp->gp_activity) = jiffies;
-               if (gp_init_delay > 0 &&
-                   !(rsp->gpnum % (rcu_num_nodes * PER_RCU_NODE_PERIOD)))
-                       schedule_timeout_uninterruptible(gp_init_delay);
+               WRITE_ONCE(rsp->gp_activity, jiffies);
        }
 
        return 1;
@@ -1864,7 +1915,7 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
        unsigned long maxj;
        struct rcu_node *rnp = rcu_get_root(rsp);
 
-       ACCESS_ONCE(rsp->gp_activity) = jiffies;
+       WRITE_ONCE(rsp->gp_activity, jiffies);
        rsp->n_force_qs++;
        if (fqs_state == RCU_SAVE_DYNTICK) {
                /* Collect dyntick-idle snapshots. */
@@ -1882,11 +1933,11 @@ static int rcu_gp_fqs(struct rcu_state *rsp, int fqs_state_in)
                force_qs_rnp(rsp, rcu_implicit_dynticks_qs, &isidle, &maxj);
        }
        /* Clear flag to prevent immediate re-entry. */
-       if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
+       if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
                raw_spin_lock_irq(&rnp->lock);
                smp_mb__after_unlock_lock();
-               ACCESS_ONCE(rsp->gp_flags) =
-                       ACCESS_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS;
+               WRITE_ONCE(rsp->gp_flags,
+                          READ_ONCE(rsp->gp_flags) & ~RCU_GP_FLAG_FQS);
                raw_spin_unlock_irq(&rnp->lock);
        }
        return fqs_state;
@@ -1903,7 +1954,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
        struct rcu_data *rdp;
        struct rcu_node *rnp = rcu_get_root(rsp);
 
-       ACCESS_ONCE(rsp->gp_activity) = jiffies;
+       WRITE_ONCE(rsp->gp_activity, jiffies);
        raw_spin_lock_irq(&rnp->lock);
        smp_mb__after_unlock_lock();
        gp_duration = jiffies - rsp->gp_start;
@@ -1934,7 +1985,7 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
                smp_mb__after_unlock_lock();
                WARN_ON_ONCE(rcu_preempt_blocked_readers_cgp(rnp));
                WARN_ON_ONCE(rnp->qsmask);
-               ACCESS_ONCE(rnp->completed) = rsp->gpnum;
+               WRITE_ONCE(rnp->completed, rsp->gpnum);
                rdp = this_cpu_ptr(rsp->rda);
                if (rnp == rdp->mynode)
                        needgp = __note_gp_changes(rsp, rnp, rdp) || needgp;
@@ -1942,7 +1993,8 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
                nocb += rcu_future_gp_cleanup(rsp, rnp);
                raw_spin_unlock_irq(&rnp->lock);
                cond_resched_rcu_qs();
-               ACCESS_ONCE(rsp->gp_activity) = jiffies;
+               WRITE_ONCE(rsp->gp_activity, jiffies);
+               rcu_gp_slow(rsp, gp_cleanup_delay);
        }
        rnp = rcu_get_root(rsp);
        raw_spin_lock_irq(&rnp->lock);
@@ -1950,16 +2002,16 @@ static void rcu_gp_cleanup(struct rcu_state *rsp)
        rcu_nocb_gp_set(rnp, nocb);
 
        /* Declare grace period done. */
-       ACCESS_ONCE(rsp->completed) = rsp->gpnum;
+       WRITE_ONCE(rsp->completed, rsp->gpnum);
        trace_rcu_grace_period(rsp->name, rsp->completed, TPS("end"));
        rsp->fqs_state = RCU_GP_IDLE;
        rdp = this_cpu_ptr(rsp->rda);
        /* Advance CBs to reduce false positives below. */
        needgp = rcu_advance_cbs(rsp, rnp, rdp) || needgp;
        if (needgp || cpu_needs_another_gp(rsp, rdp)) {
-               ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
+               WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
                trace_rcu_grace_period(rsp->name,
-                                      ACCESS_ONCE(rsp->gpnum),
+                                      READ_ONCE(rsp->gpnum),
                                       TPS("newreq"));
        }
        raw_spin_unlock_irq(&rnp->lock);
@@ -1983,20 +2035,20 @@ static int __noreturn rcu_gp_kthread(void *arg)
                /* Handle grace-period start. */
                for (;;) {
                        trace_rcu_grace_period(rsp->name,
-                                              ACCESS_ONCE(rsp->gpnum),
+                                              READ_ONCE(rsp->gpnum),
                                               TPS("reqwait"));
                        rsp->gp_state = RCU_GP_WAIT_GPS;
                        wait_event_interruptible(rsp->gp_wq,
-                                                ACCESS_ONCE(rsp->gp_flags) &
+                                                READ_ONCE(rsp->gp_flags) &
                                                 RCU_GP_FLAG_INIT);
                        /* Locking provides needed memory barrier. */
                        if (rcu_gp_init(rsp))
                                break;
                        cond_resched_rcu_qs();
-                       ACCESS_ONCE(rsp->gp_activity) = jiffies;
+                       WRITE_ONCE(rsp->gp_activity, jiffies);
                        WARN_ON(signal_pending(current));
                        trace_rcu_grace_period(rsp->name,
-                                              ACCESS_ONCE(rsp->gpnum),
+                                              READ_ONCE(rsp->gpnum),
                                               TPS("reqwaitsig"));
                }
 
@@ -2012,39 +2064,39 @@ static int __noreturn rcu_gp_kthread(void *arg)
                        if (!ret)
                                rsp->jiffies_force_qs = jiffies + j;
                        trace_rcu_grace_period(rsp->name,
-                                              ACCESS_ONCE(rsp->gpnum),
+                                              READ_ONCE(rsp->gpnum),
                                               TPS("fqswait"));
                        rsp->gp_state = RCU_GP_WAIT_FQS;
                        ret = wait_event_interruptible_timeout(rsp->gp_wq,
-                                       ((gf = ACCESS_ONCE(rsp->gp_flags)) &
+                                       ((gf = READ_ONCE(rsp->gp_flags)) &
                                         RCU_GP_FLAG_FQS) ||
-                                       (!ACCESS_ONCE(rnp->qsmask) &&
+                                       (!READ_ONCE(rnp->qsmask) &&
                                         !rcu_preempt_blocked_readers_cgp(rnp)),
                                        j);
                        /* Locking provides needed memory barriers. */
                        /* If grace period done, leave loop. */
-                       if (!ACCESS_ONCE(rnp->qsmask) &&
+                       if (!READ_ONCE(rnp->qsmask) &&
                            !rcu_preempt_blocked_readers_cgp(rnp))
                                break;
                        /* If time for quiescent-state forcing, do it. */
                        if (ULONG_CMP_GE(jiffies, rsp->jiffies_force_qs) ||
                            (gf & RCU_GP_FLAG_FQS)) {
                                trace_rcu_grace_period(rsp->name,
-                                                      ACCESS_ONCE(rsp->gpnum),
+                                                      READ_ONCE(rsp->gpnum),
                                                       TPS("fqsstart"));
                                fqs_state = rcu_gp_fqs(rsp, fqs_state);
                                trace_rcu_grace_period(rsp->name,
-                                                      ACCESS_ONCE(rsp->gpnum),
+                                                      READ_ONCE(rsp->gpnum),
                                                       TPS("fqsend"));
                                cond_resched_rcu_qs();
-                               ACCESS_ONCE(rsp->gp_activity) = jiffies;
+                               WRITE_ONCE(rsp->gp_activity, jiffies);
                        } else {
                                /* Deal with stray signal. */
                                cond_resched_rcu_qs();
-                               ACCESS_ONCE(rsp->gp_activity) = jiffies;
+                               WRITE_ONCE(rsp->gp_activity, jiffies);
                                WARN_ON(signal_pending(current));
                                trace_rcu_grace_period(rsp->name,
-                                                      ACCESS_ONCE(rsp->gpnum),
+                                                      READ_ONCE(rsp->gpnum),
                                                       TPS("fqswaitsig"));
                        }
                        j = jiffies_till_next_fqs;
@@ -2086,8 +2138,8 @@ rcu_start_gp_advanced(struct rcu_state *rsp, struct rcu_node *rnp,
                 */
                return false;
        }
-       ACCESS_ONCE(rsp->gp_flags) = RCU_GP_FLAG_INIT;
-       trace_rcu_grace_period(rsp->name, ACCESS_ONCE(rsp->gpnum),
+       WRITE_ONCE(rsp->gp_flags, RCU_GP_FLAG_INIT);
+       trace_rcu_grace_period(rsp->name, READ_ONCE(rsp->gpnum),
                               TPS("newreq"));
 
        /*
@@ -2137,6 +2189,7 @@ static void rcu_report_qs_rsp(struct rcu_state *rsp, unsigned long flags)
        __releases(rcu_get_root(rsp)->lock)
 {
        WARN_ON_ONCE(!rcu_gp_in_progress(rsp));
+       WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
        raw_spin_unlock_irqrestore(&rcu_get_root(rsp)->lock, flags);
        rcu_gp_kthread_wake(rsp);
 }
@@ -2334,8 +2387,6 @@ rcu_check_quiescent_state(struct rcu_state *rsp, struct rcu_data *rdp)
        rcu_report_qs_rdp(rdp->cpu, rsp, rdp);
 }
 
-#ifdef CONFIG_HOTPLUG_CPU
-
 /*
  * Send the specified CPU's RCU callbacks to the orphanage.  The
  * specified CPU must be offline, and the caller must hold the
@@ -2346,7 +2397,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
                          struct rcu_node *rnp, struct rcu_data *rdp)
 {
        /* No-CBs CPUs do not have orphanable callbacks. */
-       if (rcu_is_nocb_cpu(rdp->cpu))
+       if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) || rcu_is_nocb_cpu(rdp->cpu))
                return;
 
        /*
@@ -2359,7 +2410,7 @@ rcu_send_cbs_to_orphanage(int cpu, struct rcu_state *rsp,
                rsp->qlen += rdp->qlen;
                rdp->n_cbs_orphaned += rdp->qlen;
                rdp->qlen_lazy = 0;
-               ACCESS_ONCE(rdp->qlen) = 0;
+               WRITE_ONCE(rdp->qlen, 0);
        }
 
        /*
@@ -2405,7 +2456,8 @@ static void rcu_adopt_orphan_cbs(struct rcu_state *rsp, unsigned long flags)
        struct rcu_data *rdp = raw_cpu_ptr(rsp->rda);
 
        /* No-CBs CPUs are handled specially. */
-       if (rcu_nocb_adopt_orphan_cbs(rsp, rdp, flags))
+       if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
+           rcu_nocb_adopt_orphan_cbs(rsp, rdp, flags))
                return;
 
        /* Do the accounting first. */
@@ -2452,6 +2504,9 @@ static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
        RCU_TRACE(struct rcu_data *rdp = this_cpu_ptr(rsp->rda));
        RCU_TRACE(struct rcu_node *rnp = rdp->mynode);
 
+       if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
+               return;
+
        RCU_TRACE(mask = rdp->grpmask);
        trace_rcu_grace_period(rsp->name,
                               rnp->gpnum + 1 - !!(rnp->qsmask & mask),
@@ -2480,7 +2535,8 @@ static void rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
        long mask;
        struct rcu_node *rnp = rnp_leaf;
 
-       if (rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
+       if (!IS_ENABLED(CONFIG_HOTPLUG_CPU) ||
+           rnp->qsmaskinit || rcu_preempt_has_tasks(rnp))
                return;
        for (;;) {
                mask = rnp->grpmask;
@@ -2511,6 +2567,9 @@ static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
        struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
        struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
 
+       if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
+               return;
+
        /* Remove outgoing CPU from mask in the leaf rcu_node structure. */
        mask = rdp->grpmask;
        raw_spin_lock_irqsave(&rnp->lock, flags);
@@ -2532,6 +2591,9 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
        struct rcu_data *rdp = per_cpu_ptr(rsp->rda, cpu);
        struct rcu_node *rnp = rdp->mynode;  /* Outgoing CPU's rdp & rnp. */
 
+       if (!IS_ENABLED(CONFIG_HOTPLUG_CPU))
+               return;
+
        /* Adjust any no-longer-needed kthreads. */
        rcu_boost_kthread_setaffinity(rnp, -1);
 
@@ -2546,26 +2608,6 @@ static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
                  cpu, rdp->qlen, rdp->nxtlist);
 }
 
-#else /* #ifdef CONFIG_HOTPLUG_CPU */
-
-static void rcu_cleanup_dying_cpu(struct rcu_state *rsp)
-{
-}
-
-static void __maybe_unused rcu_cleanup_dead_rnp(struct rcu_node *rnp_leaf)
-{
-}
-
-static void rcu_cleanup_dying_idle_cpu(int cpu, struct rcu_state *rsp)
-{
-}
-
-static void rcu_cleanup_dead_cpu(int cpu, struct rcu_state *rsp)
-{
-}
-
-#endif /* #else #ifdef CONFIG_HOTPLUG_CPU */
-
 /*
  * Invoke any RCU callbacks that have made it to the end of their grace
  * period.  Thottle as specified by rdp->blimit.
@@ -2580,7 +2622,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
        /* If no callbacks are ready, just return. */
        if (!cpu_has_callbacks_ready_to_invoke(rdp)) {
                trace_rcu_batch_start(rsp->name, rdp->qlen_lazy, rdp->qlen, 0);
-               trace_rcu_batch_end(rsp->name, 0, !!ACCESS_ONCE(rdp->nxtlist),
+               trace_rcu_batch_end(rsp->name, 0, !!READ_ONCE(rdp->nxtlist),
                                    need_resched(), is_idle_task(current),
                                    rcu_is_callbacks_kthread());
                return;
@@ -2636,7 +2678,7 @@ static void rcu_do_batch(struct rcu_state *rsp, struct rcu_data *rdp)
        }
        smp_mb(); /* List handling before counting for rcu_barrier(). */
        rdp->qlen_lazy -= count_lazy;
-       ACCESS_ONCE(rdp->qlen) = rdp->qlen - count;
+       WRITE_ONCE(rdp->qlen, rdp->qlen - count);
        rdp->n_cbs_invoked += count;
 
        /* Reinstate batch limit if we have worked down the excess. */
@@ -2730,10 +2772,6 @@ static void force_qs_rnp(struct rcu_state *rsp,
                mask = 0;
                raw_spin_lock_irqsave(&rnp->lock, flags);
                smp_mb__after_unlock_lock();
-               if (!rcu_gp_in_progress(rsp)) {
-                       raw_spin_unlock_irqrestore(&rnp->lock, flags);
-                       return;
-               }
                if (rnp->qsmask == 0) {
                        if (rcu_state_p == &rcu_sched_state ||
                            rsp != rcu_state_p ||
@@ -2763,8 +2801,6 @@ static void force_qs_rnp(struct rcu_state *rsp,
                bit = 1;
                for (; cpu <= rnp->grphi; cpu++, bit <<= 1) {
                        if ((rnp->qsmask & bit) != 0) {
-                               if ((rnp->qsmaskinit & bit) == 0)
-                                       *isidle = false; /* Pending hotplug. */
                                if (f(per_cpu_ptr(rsp->rda, cpu), isidle, maxj))
                                        mask |= bit;
                        }
@@ -2793,7 +2829,7 @@ static void force_quiescent_state(struct rcu_state *rsp)
        /* Funnel through hierarchy to reduce memory contention. */
        rnp = __this_cpu_read(rsp->rda->mynode);
        for (; rnp != NULL; rnp = rnp->parent) {
-               ret = (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
+               ret = (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) ||
                      !raw_spin_trylock(&rnp->fqslock);
                if (rnp_old != NULL)
                        raw_spin_unlock(&rnp_old->fqslock);
@@ -2809,13 +2845,12 @@ static void force_quiescent_state(struct rcu_state *rsp)
        raw_spin_lock_irqsave(&rnp_old->lock, flags);
        smp_mb__after_unlock_lock();
        raw_spin_unlock(&rnp_old->fqslock);
-       if (ACCESS_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
+       if (READ_ONCE(rsp->gp_flags) & RCU_GP_FLAG_FQS) {
                rsp->n_force_qs_lh++;
                raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
                return;  /* Someone beat us to it. */
        }
-       ACCESS_ONCE(rsp->gp_flags) =
-               ACCESS_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS;
+       WRITE_ONCE(rsp->gp_flags, READ_ONCE(rsp->gp_flags) | RCU_GP_FLAG_FQS);
        raw_spin_unlock_irqrestore(&rnp_old->lock, flags);
        rcu_gp_kthread_wake(rsp);
 }
@@ -2881,7 +2916,7 @@ static void rcu_process_callbacks(struct softirq_action *unused)
  */
 static void invoke_rcu_callbacks(struct rcu_state *rsp, struct rcu_data *rdp)
 {
-       if (unlikely(!ACCESS_ONCE(rcu_scheduler_fully_active)))
+       if (unlikely(!READ_ONCE(rcu_scheduler_fully_active)))
                return;
        if (likely(!rsp->boost)) {
                rcu_do_batch(rsp, rdp);
@@ -2972,7 +3007,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
        WARN_ON_ONCE((unsigned long)head & 0x1); /* Misaligned rcu_head! */
        if (debug_rcu_head_queue(head)) {
                /* Probable double call_rcu(), so leak the callback. */
-               ACCESS_ONCE(head->func) = rcu_leak_callback;
+               WRITE_ONCE(head->func, rcu_leak_callback);
                WARN_ONCE(1, "__call_rcu(): Leaked duplicate callback\n");
                return;
        }
@@ -3011,7 +3046,7 @@ __call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu),
                if (!likely(rdp->nxtlist))
                        init_default_callback_list(rdp);
        }
-       ACCESS_ONCE(rdp->qlen) = rdp->qlen + 1;
+       WRITE_ONCE(rdp->qlen, rdp->qlen + 1);
        if (lazy)
                rdp->qlen_lazy++;
        else
@@ -3287,7 +3322,7 @@ void synchronize_sched_expedited(void)
        if (ULONG_CMP_GE((ulong)atomic_long_read(&rsp->expedited_start),
                         (ulong)atomic_long_read(&rsp->expedited_done) +
                         ULONG_MAX / 8)) {
-               synchronize_sched();
+               wait_rcu_gp(call_rcu_sched);
                atomic_long_inc(&rsp->expedited_wrap);
                return;
        }
@@ -3450,14 +3485,14 @@ static int __rcu_pending(struct rcu_state *rsp, struct rcu_data *rdp)
        }
 
        /* Has another RCU grace period completed?  */
-       if (ACCESS_ONCE(rnp->completed) != rdp->completed) { /* outside lock */
+       if (READ_ONCE(rnp->completed) != rdp->completed) { /* outside lock */
                rdp->n_rp_gp_completed++;
                return 1;
        }
 
        /* Has a new RCU grace period started? */
-       if (ACCESS_ONCE(rnp->gpnum) != rdp->gpnum ||
-           unlikely(ACCESS_ONCE(rdp->gpwrap))) { /* outside lock */
+       if (READ_ONCE(rnp->gpnum) != rdp->gpnum ||
+           unlikely(READ_ONCE(rdp->gpwrap))) { /* outside lock */
                rdp->n_rp_gp_started++;
                return 1;
        }
@@ -3493,7 +3528,7 @@ static int rcu_pending(void)
  * non-NULL, store an indication of whether all callbacks are lazy.
  * (If there are no callbacks, all of them are deemed to be lazy.)
  */
-static int __maybe_unused rcu_cpu_has_callbacks(bool *all_lazy)
+static bool __maybe_unused rcu_cpu_has_callbacks(bool *all_lazy)
 {
        bool al = true;
        bool hc = false;
@@ -3564,7 +3599,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
 {
        int cpu;
        struct rcu_data *rdp;
-       unsigned long snap = ACCESS_ONCE(rsp->n_barrier_done);
+       unsigned long snap = READ_ONCE(rsp->n_barrier_done);
        unsigned long snap_done;
 
        _rcu_barrier_trace(rsp, "Begin", -1, snap);
@@ -3606,10 +3641,10 @@ static void _rcu_barrier(struct rcu_state *rsp)
 
        /*
         * Increment ->n_barrier_done to avoid duplicate work.  Use
-        * ACCESS_ONCE() to prevent the compiler from speculating
+        * WRITE_ONCE() to prevent the compiler from speculating
         * the increment to precede the early-exit check.
         */
-       ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
+       WRITE_ONCE(rsp->n_barrier_done, rsp->n_barrier_done + 1);
        WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 1);
        _rcu_barrier_trace(rsp, "Inc1", -1, rsp->n_barrier_done);
        smp_mb(); /* Order ->n_barrier_done increment with below mechanism. */
@@ -3645,7 +3680,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
                                __call_rcu(&rdp->barrier_head,
                                           rcu_barrier_callback, rsp, cpu, 0);
                        }
-               } else if (ACCESS_ONCE(rdp->qlen)) {
+               } else if (READ_ONCE(rdp->qlen)) {
                        _rcu_barrier_trace(rsp, "OnlineQ", cpu,
                                           rsp->n_barrier_done);
                        smp_call_function_single(cpu, rcu_barrier_func, rsp, 1);
@@ -3665,7 +3700,7 @@ static void _rcu_barrier(struct rcu_state *rsp)
 
        /* Increment ->n_barrier_done to prevent duplicate work. */
        smp_mb(); /* Keep increment after above mechanism. */
-       ACCESS_ONCE(rsp->n_barrier_done) = rsp->n_barrier_done + 1;
+       WRITE_ONCE(rsp->n_barrier_done, rsp->n_barrier_done + 1);
        WARN_ON_ONCE((rsp->n_barrier_done & 0x1) != 0);
        _rcu_barrier_trace(rsp, "Inc2", -1, rsp->n_barrier_done);
        smp_mb(); /* Keep increment before caller's subsequent code. */
@@ -3780,7 +3815,7 @@ rcu_init_percpu_data(int cpu, struct rcu_state *rsp)
        rdp->gpnum = rnp->completed; /* Make CPU later note any new GP. */
        rdp->completed = rnp->completed;
        rdp->passed_quiesce = false;
-       rdp->rcu_qs_ctr_snap = __this_cpu_read(rcu_qs_ctr);
+       rdp->rcu_qs_ctr_snap = per_cpu(rcu_qs_ctr, cpu);
        rdp->qs_pending = false;
        trace_rcu_grace_period(rsp->name, rdp->gpnum, TPS("cpuonl"));
        raw_spin_unlock_irqrestore(&rnp->lock, flags);
@@ -3924,16 +3959,16 @@ void rcu_scheduler_starting(void)
 
 /*
  * Compute the per-level fanout, either using the exact fanout specified
- * or balancing the tree, depending on CONFIG_RCU_FANOUT_EXACT.
+ * or balancing the tree, depending on the rcu_fanout_exact boot parameter.
  */
 static void __init rcu_init_levelspread(struct rcu_state *rsp)
 {
        int i;
 
-       if (IS_ENABLED(CONFIG_RCU_FANOUT_EXACT)) {
+       if (rcu_fanout_exact) {
                rsp->levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
                for (i = rcu_num_lvls - 2; i >= 0; i--)
-                       rsp->levelspread[i] = CONFIG_RCU_FANOUT;
+                       rsp->levelspread[i] = RCU_FANOUT;
        } else {
                int ccur;
                int cprv;
@@ -3971,9 +4006,9 @@ static void __init rcu_init_one(struct rcu_state *rsp,
 
        BUILD_BUG_ON(MAX_RCU_LVLS > ARRAY_SIZE(buf));  /* Fix buf[] init! */
 
-       /* Silence gcc 4.8 warning about array index out of range. */
-       if (rcu_num_lvls > RCU_NUM_LVLS)
-               panic("rcu_init_one: rcu_num_lvls overflow");
+       /* Silence gcc 4.8 false positive about array index out of range. */
+       if (rcu_num_lvls <= 0 || rcu_num_lvls > RCU_NUM_LVLS)
+               panic("rcu_init_one: rcu_num_lvls out of range");
 
        /* Initialize the level-tracking arrays. */
 
@@ -4059,7 +4094,7 @@ static void __init rcu_init_geometry(void)
                jiffies_till_next_fqs = d;
 
        /* If the compile-time values are accurate, just leave. */
-       if (rcu_fanout_leaf == CONFIG_RCU_FANOUT_LEAF &&
+       if (rcu_fanout_leaf == RCU_FANOUT_LEAF &&
            nr_cpu_ids == NR_CPUS)
                return;
        pr_info("RCU: Adjusting geometry for rcu_fanout_leaf=%d, nr_cpu_ids=%d\n",
@@ -4073,7 +4108,7 @@ static void __init rcu_init_geometry(void)
        rcu_capacity[0] = 1;
        rcu_capacity[1] = rcu_fanout_leaf;
        for (i = 2; i <= MAX_RCU_LVLS; i++)
-               rcu_capacity[i] = rcu_capacity[i - 1] * CONFIG_RCU_FANOUT;
+               rcu_capacity[i] = rcu_capacity[i - 1] * RCU_FANOUT;
 
        /*
         * The boot-time rcu_fanout_leaf parameter is only permitted
@@ -4083,7 +4118,7 @@ static void __init rcu_init_geometry(void)
         * the configured number of CPUs.  Complain and fall back to the
         * compile-time values if these limits are exceeded.
         */
-       if (rcu_fanout_leaf < CONFIG_RCU_FANOUT_LEAF ||
+       if (rcu_fanout_leaf < RCU_FANOUT_LEAF ||
            rcu_fanout_leaf > sizeof(unsigned long) * 8 ||
            n > rcu_capacity[MAX_RCU_LVLS]) {
                WARN_ON(1);
@@ -4109,6 +4144,28 @@ static void __init rcu_init_geometry(void)
        rcu_num_nodes -= n;
 }
 
+/*
+ * Dump out the structure of the rcu_node combining tree associated
+ * with the rcu_state structure referenced by rsp.
+ */
+static void __init rcu_dump_rcu_node_tree(struct rcu_state *rsp)
+{
+       int level = 0;
+       struct rcu_node *rnp;
+
+       pr_info("rcu_node tree layout dump\n");
+       pr_info(" ");
+       rcu_for_each_node_breadth_first(rsp, rnp) {
+               if (rnp->level != level) {
+                       pr_cont("\n");
+                       pr_info(" ");
+                       level = rnp->level;
+               }
+               pr_cont("%d:%d ^%d  ", rnp->grplo, rnp->grphi, rnp->grpnum);
+       }
+       pr_cont("\n");
+}
+
 void __init rcu_init(void)
 {
        int cpu;
@@ -4119,6 +4176,8 @@ void __init rcu_init(void)
        rcu_init_geometry();
        rcu_init_one(&rcu_bh_state, &rcu_bh_data);
        rcu_init_one(&rcu_sched_state, &rcu_sched_data);
+       if (dump_tree)
+               rcu_dump_rcu_node_tree(&rcu_sched_state);
        __rcu_init_preempt();
        open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
 
index a69d3dab2ec4dbf9dc8c412813c84c27363f9378..4adb7ca0bf47a209067c66205ace8b6f0dbebb61 100644 (file)
  * In practice, this did work well going from three levels to four.
  * Of course, your mileage may vary.
  */
+
 #define MAX_RCU_LVLS 4
-#define RCU_FANOUT_1         (CONFIG_RCU_FANOUT_LEAF)
-#define RCU_FANOUT_2         (RCU_FANOUT_1 * CONFIG_RCU_FANOUT)
-#define RCU_FANOUT_3         (RCU_FANOUT_2 * CONFIG_RCU_FANOUT)
-#define RCU_FANOUT_4         (RCU_FANOUT_3 * CONFIG_RCU_FANOUT)
+
+#ifdef CONFIG_RCU_FANOUT
+#define RCU_FANOUT CONFIG_RCU_FANOUT
+#else /* #ifdef CONFIG_RCU_FANOUT */
+# ifdef CONFIG_64BIT
+# define RCU_FANOUT 64
+# else
+# define RCU_FANOUT 32
+# endif
+#endif /* #else #ifdef CONFIG_RCU_FANOUT */
+
+#ifdef CONFIG_RCU_FANOUT_LEAF
+#define RCU_FANOUT_LEAF CONFIG_RCU_FANOUT_LEAF
+#else /* #ifdef CONFIG_RCU_FANOUT_LEAF */
+# ifdef CONFIG_64BIT
+# define RCU_FANOUT_LEAF 64
+# else
+# define RCU_FANOUT_LEAF 32
+# endif
+#endif /* #else #ifdef CONFIG_RCU_FANOUT_LEAF */
+
+#define RCU_FANOUT_1         (RCU_FANOUT_LEAF)
+#define RCU_FANOUT_2         (RCU_FANOUT_1 * RCU_FANOUT)
+#define RCU_FANOUT_3         (RCU_FANOUT_2 * RCU_FANOUT)
+#define RCU_FANOUT_4         (RCU_FANOUT_3 * RCU_FANOUT)
 
 #if NR_CPUS <= RCU_FANOUT_1
 #  define RCU_NUM_LVLS       1
@@ -170,7 +192,6 @@ struct rcu_node {
                                /*  if there is no such task.  If there */
                                /*  is no current expedited grace period, */
                                /*  then there can cannot be any such task. */
-#ifdef CONFIG_RCU_BOOST
        struct list_head *boost_tasks;
                                /* Pointer to first task that needs to be */
                                /*  priority boosted, or NULL if no priority */
@@ -208,7 +229,6 @@ struct rcu_node {
        unsigned long n_balk_nos;
                                /* Refused to boost: not sure why, though. */
                                /*  This can happen due to race conditions. */
-#endif /* #ifdef CONFIG_RCU_BOOST */
 #ifdef CONFIG_RCU_NOCB_CPU
        wait_queue_head_t nocb_gp_wq[2];
                                /* Place for rcu_nocb_kthread() to wait GP. */
@@ -519,14 +539,11 @@ extern struct list_head rcu_struct_flavors;
  * RCU implementation internal declarations:
  */
 extern struct rcu_state rcu_sched_state;
-DECLARE_PER_CPU(struct rcu_data, rcu_sched_data);
 
 extern struct rcu_state rcu_bh_state;
-DECLARE_PER_CPU(struct rcu_data, rcu_bh_data);
 
 #ifdef CONFIG_PREEMPT_RCU
 extern struct rcu_state rcu_preempt_state;
-DECLARE_PER_CPU(struct rcu_data, rcu_preempt_data);
 #endif /* #ifdef CONFIG_PREEMPT_RCU */
 
 #ifdef CONFIG_RCU_BOOST
index 8c0ec0f5a02702f1a3c5ed5db0bdf346ac7ae140..013485fb2b06b9f499d0673a36bf8f62d5e72607 100644 (file)
@@ -43,7 +43,17 @@ DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_status);
 DEFINE_PER_CPU(unsigned int, rcu_cpu_kthread_loops);
 DEFINE_PER_CPU(char, rcu_cpu_has_work);
 
-#endif /* #ifdef CONFIG_RCU_BOOST */
+#else /* #ifdef CONFIG_RCU_BOOST */
+
+/*
+ * Some architectures do not define rt_mutexes, but if !CONFIG_RCU_BOOST,
+ * all uses are in dead code.  Provide a definition to keep the compiler
+ * happy, but add WARN_ON_ONCE() to complain if used in the wrong place.
+ * This probably needs to be excluded from -rt builds.
+ */
+#define rt_mutex_owner(a) ({ WARN_ON_ONCE(1); NULL; })
+
+#endif /* #else #ifdef CONFIG_RCU_BOOST */
 
 #ifdef CONFIG_RCU_NOCB_CPU
 static cpumask_var_t rcu_nocb_mask; /* CPUs to have callbacks offloaded. */
@@ -60,11 +70,11 @@ static void __init rcu_bootup_announce_oddness(void)
 {
        if (IS_ENABLED(CONFIG_RCU_TRACE))
                pr_info("\tRCU debugfs-based tracing is enabled.\n");
-       if ((IS_ENABLED(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 64) ||
-           (!IS_ENABLED(CONFIG_64BIT) && CONFIG_RCU_FANOUT != 32))
+       if ((IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 64) ||
+           (!IS_ENABLED(CONFIG_64BIT) && RCU_FANOUT != 32))
                pr_info("\tCONFIG_RCU_FANOUT set to non-default value of %d\n",
-                      CONFIG_RCU_FANOUT);
-       if (IS_ENABLED(CONFIG_RCU_FANOUT_EXACT))
+                      RCU_FANOUT);
+       if (rcu_fanout_exact)
                pr_info("\tHierarchical RCU autobalancing is disabled.\n");
        if (IS_ENABLED(CONFIG_RCU_FAST_NO_HZ))
                pr_info("\tRCU dyntick-idle grace-period acceleration is enabled.\n");
@@ -76,10 +86,10 @@ static void __init rcu_bootup_announce_oddness(void)
                pr_info("\tAdditional per-CPU info printed with stalls.\n");
        if (NUM_RCU_LVL_4 != 0)
                pr_info("\tFour-level hierarchy is enabled.\n");
-       if (CONFIG_RCU_FANOUT_LEAF != 16)
+       if (RCU_FANOUT_LEAF != 16)
                pr_info("\tBuild-time adjustment of leaf fanout to %d.\n",
-                       CONFIG_RCU_FANOUT_LEAF);
-       if (rcu_fanout_leaf != CONFIG_RCU_FANOUT_LEAF)
+                       RCU_FANOUT_LEAF);
+       if (rcu_fanout_leaf != RCU_FANOUT_LEAF)
                pr_info("\tBoot-time adjustment of leaf fanout to %d.\n", rcu_fanout_leaf);
        if (nr_cpu_ids != NR_CPUS)
                pr_info("\tRCU restricting CPUs from NR_CPUS=%d to nr_cpu_ids=%d.\n", NR_CPUS, nr_cpu_ids);
@@ -90,7 +100,8 @@ static void __init rcu_bootup_announce_oddness(void)
 #ifdef CONFIG_PREEMPT_RCU
 
 RCU_STATE_INITIALIZER(rcu_preempt, 'p', call_rcu);
-static struct rcu_state *rcu_state_p = &rcu_preempt_state;
+static struct rcu_state *const rcu_state_p = &rcu_preempt_state;
+static struct rcu_data __percpu *const rcu_data_p = &rcu_preempt_data;
 
 static int rcu_preempted_readers_exp(struct rcu_node *rnp);
 static void rcu_report_exp_rnp(struct rcu_state *rsp, struct rcu_node *rnp,
@@ -116,11 +127,11 @@ static void __init rcu_bootup_announce(void)
  */
 static void rcu_preempt_qs(void)
 {
-       if (!__this_cpu_read(rcu_preempt_data.passed_quiesce)) {
+       if (!__this_cpu_read(rcu_data_p->passed_quiesce)) {
                trace_rcu_grace_period(TPS("rcu_preempt"),
-                                      __this_cpu_read(rcu_preempt_data.gpnum),
+                                      __this_cpu_read(rcu_data_p->gpnum),
                                       TPS("cpuqs"));
-               __this_cpu_write(rcu_preempt_data.passed_quiesce, 1);
+               __this_cpu_write(rcu_data_p->passed_quiesce, 1);
                barrier(); /* Coordinate with rcu_preempt_check_callbacks(). */
                current->rcu_read_unlock_special.b.need_qs = false;
        }
@@ -150,7 +161,7 @@ static void rcu_preempt_note_context_switch(void)
            !t->rcu_read_unlock_special.b.blocked) {
 
                /* Possibly blocking in an RCU read-side critical section. */
-               rdp = this_cpu_ptr(rcu_preempt_state.rda);
+               rdp = this_cpu_ptr(rcu_state_p->rda);
                rnp = rdp->mynode;
                raw_spin_lock_irqsave(&rnp->lock, flags);
                smp_mb__after_unlock_lock();
@@ -180,10 +191,9 @@ static void rcu_preempt_note_context_switch(void)
                if ((rnp->qsmask & rdp->grpmask) && rnp->gp_tasks != NULL) {
                        list_add(&t->rcu_node_entry, rnp->gp_tasks->prev);
                        rnp->gp_tasks = &t->rcu_node_entry;
-#ifdef CONFIG_RCU_BOOST
-                       if (rnp->boost_tasks != NULL)
+                       if (IS_ENABLED(CONFIG_RCU_BOOST) &&
+                           rnp->boost_tasks != NULL)
                                rnp->boost_tasks = rnp->gp_tasks;
-#endif /* #ifdef CONFIG_RCU_BOOST */
                } else {
                        list_add(&t->rcu_node_entry, &rnp->blkd_tasks);
                        if (rnp->qsmask & rdp->grpmask)
@@ -263,9 +273,7 @@ void rcu_read_unlock_special(struct task_struct *t)
        bool empty_exp_now;
        unsigned long flags;
        struct list_head *np;
-#ifdef CONFIG_RCU_BOOST
        bool drop_boost_mutex = false;
-#endif /* #ifdef CONFIG_RCU_BOOST */
        struct rcu_node *rnp;
        union rcu_special special;
 
@@ -307,9 +315,11 @@ void rcu_read_unlock_special(struct task_struct *t)
                t->rcu_read_unlock_special.b.blocked = false;
 
                /*
-                * Remove this task from the list it blocked on.  The
-                * task can migrate while we acquire the lock, but at
-                * most one time.  So at most two passes through loop.
+                * Remove this task from the list it blocked on.  The task
+                * now remains queued on the rcu_node corresponding to
+                * the CPU it first blocked on, so the first attempt to
+                * acquire the task's rcu_node's ->lock will succeed.
+                * Keep the loop and add a WARN_ON() out of sheer paranoia.
                 */
                for (;;) {
                        rnp = t->rcu_blocked_node;
@@ -317,6 +327,7 @@ void rcu_read_unlock_special(struct task_struct *t)
                        smp_mb__after_unlock_lock();
                        if (rnp == t->rcu_blocked_node)
                                break;
+                       WARN_ON_ONCE(1);
                        raw_spin_unlock(&rnp->lock); /* irqs remain disabled. */
                }
                empty_norm = !rcu_preempt_blocked_readers_cgp(rnp);
@@ -331,12 +342,12 @@ void rcu_read_unlock_special(struct task_struct *t)
                        rnp->gp_tasks = np;
                if (&t->rcu_node_entry == rnp->exp_tasks)
                        rnp->exp_tasks = np;
-#ifdef CONFIG_RCU_BOOST
-               if (&t->rcu_node_entry == rnp->boost_tasks)
-                       rnp->boost_tasks = np;
-               /* Snapshot ->boost_mtx ownership with rcu_node lock held. */
-               drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t;
-#endif /* #ifdef CONFIG_RCU_BOOST */
+               if (IS_ENABLED(CONFIG_RCU_BOOST)) {
+                       if (&t->rcu_node_entry == rnp->boost_tasks)
+                               rnp->boost_tasks = np;
+                       /* Snapshot ->boost_mtx ownership w/rnp->lock held. */
+                       drop_boost_mutex = rt_mutex_owner(&rnp->boost_mtx) == t;
+               }
 
                /*
                 * If this was the last task on the current list, and if
@@ -353,24 +364,21 @@ void rcu_read_unlock_special(struct task_struct *t)
                                                         rnp->grplo,
                                                         rnp->grphi,
                                                         !!rnp->gp_tasks);
-                       rcu_report_unblock_qs_rnp(&rcu_preempt_state,
-                                                 rnp, flags);
+                       rcu_report_unblock_qs_rnp(rcu_state_p, rnp, flags);
                } else {
                        raw_spin_unlock_irqrestore(&rnp->lock, flags);
                }
 
-#ifdef CONFIG_RCU_BOOST
                /* Unboost if we were boosted. */
-               if (drop_boost_mutex)
+               if (IS_ENABLED(CONFIG_RCU_BOOST) && drop_boost_mutex)
                        rt_mutex_unlock(&rnp->boost_mtx);
-#endif /* #ifdef CONFIG_RCU_BOOST */
 
                /*
                 * If this was the last task on the expedited lists,
                 * then we need to report up the rcu_node hierarchy.
                 */
                if (!empty_exp && empty_exp_now)
-                       rcu_report_exp_rnp(&rcu_preempt_state, rnp, true);
+                       rcu_report_exp_rnp(rcu_state_p, rnp, true);
        } else {
                local_irq_restore(flags);
        }
@@ -390,7 +398,7 @@ static void rcu_print_detail_task_stall_rnp(struct rcu_node *rnp)
                raw_spin_unlock_irqrestore(&rnp->lock, flags);
                return;
        }
-       t = list_entry(rnp->gp_tasks,
+       t = list_entry(rnp->gp_tasks->prev,
                       struct task_struct, rcu_node_entry);
        list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry)
                sched_show_task(t);
@@ -447,7 +455,7 @@ static int rcu_print_task_stall(struct rcu_node *rnp)
        if (!rcu_preempt_blocked_readers_cgp(rnp))
                return 0;
        rcu_print_task_stall_begin(rnp);
-       t = list_entry(rnp->gp_tasks,
+       t = list_entry(rnp->gp_tasks->prev,
                       struct task_struct, rcu_node_entry);
        list_for_each_entry_continue(t, &rnp->blkd_tasks, rcu_node_entry) {
                pr_cont(" P%d", t->pid);
@@ -491,8 +499,8 @@ static void rcu_preempt_check_callbacks(void)
                return;
        }
        if (t->rcu_read_lock_nesting > 0 &&
-           __this_cpu_read(rcu_preempt_data.qs_pending) &&
-           !__this_cpu_read(rcu_preempt_data.passed_quiesce))
+           __this_cpu_read(rcu_data_p->qs_pending) &&
+           !__this_cpu_read(rcu_data_p->passed_quiesce))
                t->rcu_read_unlock_special.b.need_qs = true;
 }
 
@@ -500,7 +508,7 @@ static void rcu_preempt_check_callbacks(void)
 
 static void rcu_preempt_do_callbacks(void)
 {
-       rcu_do_batch(&rcu_preempt_state, this_cpu_ptr(&rcu_preempt_data));
+       rcu_do_batch(rcu_state_p, this_cpu_ptr(rcu_data_p));
 }
 
 #endif /* #ifdef CONFIG_RCU_BOOST */
@@ -510,7 +518,7 @@ static void rcu_preempt_do_callbacks(void)
  */
 void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
 {
-       __call_rcu(head, func, &rcu_preempt_state, -1, 0);
+       __call_rcu(head, func, rcu_state_p, -1, 0);
 }
 EXPORT_SYMBOL_GPL(call_rcu);
 
@@ -570,7 +578,7 @@ static int rcu_preempted_readers_exp(struct rcu_node *rnp)
 static int sync_rcu_preempt_exp_done(struct rcu_node *rnp)
 {
        return !rcu_preempted_readers_exp(rnp) &&
-              ACCESS_ONCE(rnp->expmask) == 0;
+              READ_ONCE(rnp->expmask) == 0;
 }
 
 /*
@@ -711,12 +719,12 @@ sync_rcu_preempt_exp_init2(struct rcu_state *rsp, struct rcu_node *rnp)
 void synchronize_rcu_expedited(void)
 {
        struct rcu_node *rnp;
-       struct rcu_state *rsp = &rcu_preempt_state;
+       struct rcu_state *rsp = rcu_state_p;
        unsigned long snap;
        int trycount = 0;
 
        smp_mb(); /* Caller's modifications seen first by other CPUs. */
-       snap = ACCESS_ONCE(sync_rcu_preempt_exp_count) + 1;
+       snap = READ_ONCE(sync_rcu_preempt_exp_count) + 1;
        smp_mb(); /* Above access cannot bleed into critical section. */
 
        /*
@@ -740,7 +748,7 @@ void synchronize_rcu_expedited(void)
         */
        while (!mutex_trylock(&sync_rcu_preempt_exp_mutex)) {
                if (ULONG_CMP_LT(snap,
-                   ACCESS_ONCE(sync_rcu_preempt_exp_count))) {
+                   READ_ONCE(sync_rcu_preempt_exp_count))) {
                        put_online_cpus();
                        goto mb_ret; /* Others did our work for us. */
                }
@@ -752,7 +760,7 @@ void synchronize_rcu_expedited(void)
                        return;
                }
        }
-       if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count))) {
+       if (ULONG_CMP_LT(snap, READ_ONCE(sync_rcu_preempt_exp_count))) {
                put_online_cpus();
                goto unlock_mb_ret; /* Others did our work for us. */
        }
@@ -780,8 +788,7 @@ void synchronize_rcu_expedited(void)
 
        /* Clean up and exit. */
        smp_mb(); /* ensure expedited GP seen before counter increment. */
-       ACCESS_ONCE(sync_rcu_preempt_exp_count) =
-                                       sync_rcu_preempt_exp_count + 1;
+       WRITE_ONCE(sync_rcu_preempt_exp_count, sync_rcu_preempt_exp_count + 1);
 unlock_mb_ret:
        mutex_unlock(&sync_rcu_preempt_exp_mutex);
 mb_ret:
@@ -799,7 +806,7 @@ EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
  */
 void rcu_barrier(void)
 {
-       _rcu_barrier(&rcu_preempt_state);
+       _rcu_barrier(rcu_state_p);
 }
 EXPORT_SYMBOL_GPL(rcu_barrier);
 
@@ -808,7 +815,7 @@ EXPORT_SYMBOL_GPL(rcu_barrier);
  */
 static void __init __rcu_init_preempt(void)
 {
-       rcu_init_one(&rcu_preempt_state, &rcu_preempt_data);
+       rcu_init_one(rcu_state_p, rcu_data_p);
 }
 
 /*
@@ -831,7 +838,8 @@ void exit_rcu(void)
 
 #else /* #ifdef CONFIG_PREEMPT_RCU */
 
-static struct rcu_state *rcu_state_p = &rcu_sched_state;
+static struct rcu_state *const rcu_state_p = &rcu_sched_state;
+static struct rcu_data __percpu *const rcu_data_p = &rcu_sched_data;
 
 /*
  * Tell them what RCU they are running.
@@ -994,8 +1002,8 @@ static int rcu_boost(struct rcu_node *rnp)
        struct task_struct *t;
        struct list_head *tb;
 
-       if (ACCESS_ONCE(rnp->exp_tasks) == NULL &&
-           ACCESS_ONCE(rnp->boost_tasks) == NULL)
+       if (READ_ONCE(rnp->exp_tasks) == NULL &&
+           READ_ONCE(rnp->boost_tasks) == NULL)
                return 0;  /* Nothing left to boost. */
 
        raw_spin_lock_irqsave(&rnp->lock, flags);
@@ -1048,8 +1056,8 @@ static int rcu_boost(struct rcu_node *rnp)
        rt_mutex_lock(&rnp->boost_mtx);
        rt_mutex_unlock(&rnp->boost_mtx);  /* Then keep lockdep happy. */
 
-       return ACCESS_ONCE(rnp->exp_tasks) != NULL ||
-              ACCESS_ONCE(rnp->boost_tasks) != NULL;
+       return READ_ONCE(rnp->exp_tasks) != NULL ||
+              READ_ONCE(rnp->boost_tasks) != NULL;
 }
 
 /*
@@ -1173,7 +1181,7 @@ static int rcu_spawn_one_boost_kthread(struct rcu_state *rsp,
        struct sched_param sp;
        struct task_struct *t;
 
-       if (&rcu_preempt_state != rsp)
+       if (rcu_state_p != rsp)
                return 0;
 
        if (!rcu_scheduler_fully_active || rcu_rnp_online_cpus(rnp) == 0)
@@ -1367,13 +1375,12 @@ static void rcu_prepare_kthreads(int cpu)
  * Because we not have RCU_FAST_NO_HZ, just check whether this CPU needs
  * any flavor of RCU.
  */
-#ifndef CONFIG_RCU_NOCB_CPU_ALL
-int rcu_needs_cpu(unsigned long *delta_jiffies)
+int rcu_needs_cpu(u64 basemono, u64 *nextevt)
 {
-       *delta_jiffies = ULONG_MAX;
-       return rcu_cpu_has_callbacks(NULL);
+       *nextevt = KTIME_MAX;
+       return IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL)
+              ? 0 : rcu_cpu_has_callbacks(NULL);
 }
-#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
 
 /*
  * Because we do not have RCU_FAST_NO_HZ, don't bother cleaning up
@@ -1432,8 +1439,6 @@ module_param(rcu_idle_gp_delay, int, 0644);
 static int rcu_idle_lazy_gp_delay = RCU_IDLE_LAZY_GP_DELAY;
 module_param(rcu_idle_lazy_gp_delay, int, 0644);
 
-extern int tick_nohz_active;
-
 /*
  * Try to advance callbacks for all flavors of RCU on the current CPU, but
  * only if it has been awhile since the last time we did so.  Afterwards,
@@ -1462,7 +1467,7 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
                 * callbacks not yet ready to invoke.
                 */
                if ((rdp->completed != rnp->completed ||
-                    unlikely(ACCESS_ONCE(rdp->gpwrap))) &&
+                    unlikely(READ_ONCE(rdp->gpwrap))) &&
                    rdp->nxttail[RCU_DONE_TAIL] != rdp->nxttail[RCU_NEXT_TAIL])
                        note_gp_changes(rsp, rdp);
 
@@ -1480,17 +1485,22 @@ static bool __maybe_unused rcu_try_advance_all_cbs(void)
  *
  * The caller must have disabled interrupts.
  */
-#ifndef CONFIG_RCU_NOCB_CPU_ALL
-int rcu_needs_cpu(unsigned long *dj)
+int rcu_needs_cpu(u64 basemono, u64 *nextevt)
 {
        struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
+       unsigned long dj;
+
+       if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL)) {
+               *nextevt = KTIME_MAX;
+               return 0;
+       }
 
        /* Snapshot to detect later posting of non-lazy callback. */
        rdtp->nonlazy_posted_snap = rdtp->nonlazy_posted;
 
        /* If no callbacks, RCU doesn't need the CPU. */
        if (!rcu_cpu_has_callbacks(&rdtp->all_lazy)) {
-               *dj = ULONG_MAX;
+               *nextevt = KTIME_MAX;
                return 0;
        }
 
@@ -1504,14 +1514,14 @@ int rcu_needs_cpu(unsigned long *dj)
 
        /* Request timer delay depending on laziness, and round. */
        if (!rdtp->all_lazy) {
-               *dj = round_up(rcu_idle_gp_delay + jiffies,
+               dj = round_up(rcu_idle_gp_delay + jiffies,
                               rcu_idle_gp_delay) - jiffies;
        } else {
-               *dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies;
+               dj = round_jiffies(rcu_idle_lazy_gp_delay + jiffies) - jiffies;
        }
+       *nextevt = basemono + dj * TICK_NSEC;
        return 0;
 }
-#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
 
 /*
  * Prepare a CPU for idle from an RCU perspective.  The first major task
@@ -1525,7 +1535,6 @@ int rcu_needs_cpu(unsigned long *dj)
  */
 static void rcu_prepare_for_idle(void)
 {
-#ifndef CONFIG_RCU_NOCB_CPU_ALL
        bool needwake;
        struct rcu_data *rdp;
        struct rcu_dynticks *rdtp = this_cpu_ptr(&rcu_dynticks);
@@ -1533,8 +1542,11 @@ static void rcu_prepare_for_idle(void)
        struct rcu_state *rsp;
        int tne;
 
+       if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL))
+               return;
+
        /* Handle nohz enablement switches conservatively. */
-       tne = ACCESS_ONCE(tick_nohz_active);
+       tne = READ_ONCE(tick_nohz_active);
        if (tne != rdtp->tick_nohz_enabled_snap) {
                if (rcu_cpu_has_callbacks(NULL))
                        invoke_rcu_core(); /* force nohz to see update. */
@@ -1580,7 +1592,6 @@ static void rcu_prepare_for_idle(void)
                if (needwake)
                        rcu_gp_kthread_wake(rsp);
        }
-#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
 }
 
 /*
@@ -1590,12 +1601,11 @@ static void rcu_prepare_for_idle(void)
  */
 static void rcu_cleanup_after_idle(void)
 {
-#ifndef CONFIG_RCU_NOCB_CPU_ALL
-       if (rcu_is_nocb_cpu(smp_processor_id()))
+       if (IS_ENABLED(CONFIG_RCU_NOCB_CPU_ALL) ||
+           rcu_is_nocb_cpu(smp_processor_id()))
                return;
        if (rcu_try_advance_all_cbs())
                invoke_rcu_core();
-#endif /* #ifndef CONFIG_RCU_NOCB_CPU_ALL */
 }
 
 /*
@@ -1760,7 +1770,7 @@ static void print_cpu_stall_info(struct rcu_state *rsp, int cpu)
               atomic_read(&rdtp->dynticks) & 0xfff,
               rdtp->dynticks_nesting, rdtp->dynticks_nmi_nesting,
               rdp->softirq_snap, kstat_softirqs_cpu(RCU_SOFTIRQ, cpu),
-              ACCESS_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
+              READ_ONCE(rsp->n_force_qs) - rsp->n_force_qs_gpstart,
               fast_no_hz);
 }
 
@@ -1898,11 +1908,11 @@ static void wake_nocb_leader(struct rcu_data *rdp, bool force)
 {
        struct rcu_data *rdp_leader = rdp->nocb_leader;
 
-       if (!ACCESS_ONCE(rdp_leader->nocb_kthread))
+       if (!READ_ONCE(rdp_leader->nocb_kthread))
                return;
-       if (ACCESS_ONCE(rdp_leader->nocb_leader_sleep) || force) {
+       if (READ_ONCE(rdp_leader->nocb_leader_sleep) || force) {
                /* Prior smp_mb__after_atomic() orders against prior enqueue. */
-               ACCESS_ONCE(rdp_leader->nocb_leader_sleep) = false;
+               WRITE_ONCE(rdp_leader->nocb_leader_sleep, false);
                wake_up(&rdp_leader->nocb_wq);
        }
 }
@@ -1934,14 +1944,14 @@ static bool rcu_nocb_cpu_needs_barrier(struct rcu_state *rsp, int cpu)
        ret = atomic_long_read(&rdp->nocb_q_count);
 
 #ifdef CONFIG_PROVE_RCU
-       rhp = ACCESS_ONCE(rdp->nocb_head);
+       rhp = READ_ONCE(rdp->nocb_head);
        if (!rhp)
-               rhp = ACCESS_ONCE(rdp->nocb_gp_head);
+               rhp = READ_ONCE(rdp->nocb_gp_head);
        if (!rhp)
-               rhp = ACCESS_ONCE(rdp->nocb_follower_head);
+               rhp = READ_ONCE(rdp->nocb_follower_head);
 
        /* Having no rcuo kthread but CBs after scheduler starts is bad! */
-       if (!ACCESS_ONCE(rdp->nocb_kthread) && rhp &&
+       if (!READ_ONCE(rdp->nocb_kthread) && rhp &&
            rcu_scheduler_fully_active) {
                /* RCU callback enqueued before CPU first came online??? */
                pr_err("RCU: Never-onlined no-CBs CPU %d has CB %p\n",
@@ -1975,12 +1985,12 @@ static void __call_rcu_nocb_enqueue(struct rcu_data *rdp,
        atomic_long_add(rhcount, &rdp->nocb_q_count);
        /* rcu_barrier() relies on ->nocb_q_count add before xchg. */
        old_rhpp = xchg(&rdp->nocb_tail, rhtp);
-       ACCESS_ONCE(*old_rhpp) = rhp;
+       WRITE_ONCE(*old_rhpp, rhp);
        atomic_long_add(rhcount_lazy, &rdp->nocb_q_count_lazy);
        smp_mb__after_atomic(); /* Store *old_rhpp before _wake test. */
 
        /* If we are not being polled and there is a kthread, awaken it ... */
-       t = ACCESS_ONCE(rdp->nocb_kthread);
+       t = READ_ONCE(rdp->nocb_kthread);
        if (rcu_nocb_poll || !t) {
                trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
                                    TPS("WakeNotPoll"));
@@ -2118,7 +2128,7 @@ static void rcu_nocb_wait_gp(struct rcu_data *rdp)
        for (;;) {
                wait_event_interruptible(
                        rnp->nocb_gp_wq[c & 0x1],
-                       (d = ULONG_CMP_GE(ACCESS_ONCE(rnp->completed), c)));
+                       (d = ULONG_CMP_GE(READ_ONCE(rnp->completed), c)));
                if (likely(d))
                        break;
                WARN_ON(signal_pending(current));
@@ -2145,7 +2155,7 @@ wait_again:
        if (!rcu_nocb_poll) {
                trace_rcu_nocb_wake(my_rdp->rsp->name, my_rdp->cpu, "Sleep");
                wait_event_interruptible(my_rdp->nocb_wq,
-                               !ACCESS_ONCE(my_rdp->nocb_leader_sleep));
+                               !READ_ONCE(my_rdp->nocb_leader_sleep));
                /* Memory barrier handled by smp_mb() calls below and repoll. */
        } else if (firsttime) {
                firsttime = false; /* Don't drown trace log with "Poll"! */
@@ -2159,12 +2169,12 @@ wait_again:
         */
        gotcbs = false;
        for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
-               rdp->nocb_gp_head = ACCESS_ONCE(rdp->nocb_head);
+               rdp->nocb_gp_head = READ_ONCE(rdp->nocb_head);
                if (!rdp->nocb_gp_head)
                        continue;  /* No CBs here, try next follower. */
 
                /* Move callbacks to wait-for-GP list, which is empty. */
-               ACCESS_ONCE(rdp->nocb_head) = NULL;
+               WRITE_ONCE(rdp->nocb_head, NULL);
                rdp->nocb_gp_tail = xchg(&rdp->nocb_tail, &rdp->nocb_head);
                gotcbs = true;
        }
@@ -2184,7 +2194,7 @@ wait_again:
                my_rdp->nocb_leader_sleep = true;
                smp_mb();  /* Ensure _sleep true before scan. */
                for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower)
-                       if (ACCESS_ONCE(rdp->nocb_head)) {
+                       if (READ_ONCE(rdp->nocb_head)) {
                                /* Found CB, so short-circuit next wait. */
                                my_rdp->nocb_leader_sleep = false;
                                break;
@@ -2205,7 +2215,7 @@ wait_again:
 
        /* Each pass through the following loop wakes a follower, if needed. */
        for (rdp = my_rdp; rdp; rdp = rdp->nocb_next_follower) {
-               if (ACCESS_ONCE(rdp->nocb_head))
+               if (READ_ONCE(rdp->nocb_head))
                        my_rdp->nocb_leader_sleep = false;/* No need to sleep.*/
                if (!rdp->nocb_gp_head)
                        continue; /* No CBs, so no need to wake follower. */
@@ -2241,7 +2251,7 @@ static void nocb_follower_wait(struct rcu_data *rdp)
                        trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu,
                                            "FollowerSleep");
                        wait_event_interruptible(rdp->nocb_wq,
-                                                ACCESS_ONCE(rdp->nocb_follower_head));
+                                                READ_ONCE(rdp->nocb_follower_head));
                } else if (firsttime) {
                        /* Don't drown trace log with "Poll"! */
                        firsttime = false;
@@ -2282,10 +2292,10 @@ static int rcu_nocb_kthread(void *arg)
                        nocb_follower_wait(rdp);
 
                /* Pull the ready-to-invoke callbacks onto local list. */
-               list = ACCESS_ONCE(rdp->nocb_follower_head);
+               list = READ_ONCE(rdp->nocb_follower_head);
                BUG_ON(!list);
                trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, "WokeNonEmpty");
-               ACCESS_ONCE(rdp->nocb_follower_head) = NULL;
+               WRITE_ONCE(rdp->nocb_follower_head, NULL);
                tail = xchg(&rdp->nocb_follower_tail, &rdp->nocb_follower_head);
 
                /* Each pass through the following loop invokes a callback. */
@@ -2324,7 +2334,7 @@ static int rcu_nocb_kthread(void *arg)
 /* Is a deferred wakeup of rcu_nocb_kthread() required? */
 static int rcu_nocb_need_deferred_wakeup(struct rcu_data *rdp)
 {
-       return ACCESS_ONCE(rdp->nocb_defer_wakeup);
+       return READ_ONCE(rdp->nocb_defer_wakeup);
 }
 
 /* Do a deferred wakeup of rcu_nocb_kthread(). */
@@ -2334,8 +2344,8 @@ static void do_nocb_deferred_wakeup(struct rcu_data *rdp)
 
        if (!rcu_nocb_need_deferred_wakeup(rdp))
                return;
-       ndw = ACCESS_ONCE(rdp->nocb_defer_wakeup);
-       ACCESS_ONCE(rdp->nocb_defer_wakeup) = RCU_NOGP_WAKE_NOT;
+       ndw = READ_ONCE(rdp->nocb_defer_wakeup);
+       WRITE_ONCE(rdp->nocb_defer_wakeup, RCU_NOGP_WAKE_NOT);
        wake_nocb_leader(rdp, ndw == RCU_NOGP_WAKE_FORCE);
        trace_rcu_nocb_wake(rdp->rsp->name, rdp->cpu, TPS("DeferredWake"));
 }
@@ -2448,7 +2458,7 @@ static void rcu_spawn_one_nocb_kthread(struct rcu_state *rsp, int cpu)
        t = kthread_run(rcu_nocb_kthread, rdp_spawn,
                        "rcuo%c/%d", rsp->abbr, cpu);
        BUG_ON(IS_ERR(t));
-       ACCESS_ONCE(rdp_spawn->nocb_kthread) = t;
+       WRITE_ONCE(rdp_spawn->nocb_kthread, t);
 }
 
 /*
@@ -2663,7 +2673,7 @@ static void rcu_sysidle_enter(int irq)
 
        /* Record start of fully idle period. */
        j = jiffies;
-       ACCESS_ONCE(rdtp->dynticks_idle_jiffies) = j;
+       WRITE_ONCE(rdtp->dynticks_idle_jiffies, j);
        smp_mb__before_atomic();
        atomic_inc(&rdtp->dynticks_idle);
        smp_mb__after_atomic();
@@ -2681,7 +2691,7 @@ static void rcu_sysidle_enter(int irq)
  */
 void rcu_sysidle_force_exit(void)
 {
-       int oldstate = ACCESS_ONCE(full_sysidle_state);
+       int oldstate = READ_ONCE(full_sysidle_state);
        int newoldstate;
 
        /*
@@ -2794,7 +2804,7 @@ static void rcu_sysidle_check_cpu(struct rcu_data *rdp, bool *isidle,
        smp_mb(); /* Read counters before timestamps. */
 
        /* Pick up timestamps. */
-       j = ACCESS_ONCE(rdtp->dynticks_idle_jiffies);
+       j = READ_ONCE(rdtp->dynticks_idle_jiffies);
        /* If this CPU entered idle more recently, update maxj timestamp. */
        if (ULONG_CMP_LT(*maxj, j))
                *maxj = j;
@@ -2831,11 +2841,11 @@ static unsigned long rcu_sysidle_delay(void)
 static void rcu_sysidle(unsigned long j)
 {
        /* Check the current state. */
-       switch (ACCESS_ONCE(full_sysidle_state)) {
+       switch (READ_ONCE(full_sysidle_state)) {
        case RCU_SYSIDLE_NOT:
 
                /* First time all are idle, so note a short idle period. */
-               ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_SHORT;
+               WRITE_ONCE(full_sysidle_state, RCU_SYSIDLE_SHORT);
                break;
 
        case RCU_SYSIDLE_SHORT:
@@ -2873,7 +2883,7 @@ static void rcu_sysidle_cancel(void)
 {
        smp_mb();
        if (full_sysidle_state > RCU_SYSIDLE_SHORT)
-               ACCESS_ONCE(full_sysidle_state) = RCU_SYSIDLE_NOT;
+               WRITE_ONCE(full_sysidle_state, RCU_SYSIDLE_NOT);
 }
 
 /*
@@ -2925,7 +2935,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
        smp_mb();  /* grace period precedes setting inuse. */
 
        rshp = container_of(rhp, struct rcu_sysidle_head, rh);
-       ACCESS_ONCE(rshp->inuse) = 0;
+       WRITE_ONCE(rshp->inuse, 0);
 }
 
 /*
@@ -2936,7 +2946,7 @@ static void rcu_sysidle_cb(struct rcu_head *rhp)
 bool rcu_sys_is_idle(void)
 {
        static struct rcu_sysidle_head rsh;
-       int rss = ACCESS_ONCE(full_sysidle_state);
+       int rss = READ_ONCE(full_sysidle_state);
 
        if (WARN_ON_ONCE(smp_processor_id() != tick_do_timer_cpu))
                return false;
@@ -2964,7 +2974,7 @@ bool rcu_sys_is_idle(void)
                        }
                        rcu_sysidle_report(rcu_state_p, isidle, maxj, false);
                        oldrss = rss;
-                       rss = ACCESS_ONCE(full_sysidle_state);
+                       rss = READ_ONCE(full_sysidle_state);
                }
        }
 
@@ -3048,10 +3058,10 @@ static bool rcu_nohz_full_cpu(struct rcu_state *rsp)
 #ifdef CONFIG_NO_HZ_FULL
        if (tick_nohz_full_cpu(smp_processor_id()) &&
            (!rcu_gp_in_progress(rsp) ||
-            ULONG_CMP_LT(jiffies, ACCESS_ONCE(rsp->gp_start) + HZ)))
-               return 1;
+            ULONG_CMP_LT(jiffies, READ_ONCE(rsp->gp_start) + HZ)))
+               return true;
 #endif /* #ifdef CONFIG_NO_HZ_FULL */
-       return 0;
+       return false;
 }
 
 /*
@@ -3077,7 +3087,7 @@ static void rcu_bind_gp_kthread(void)
 static void rcu_dynticks_task_enter(void)
 {
 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
-       ACCESS_ONCE(current->rcu_tasks_idle_cpu) = smp_processor_id();
+       WRITE_ONCE(current->rcu_tasks_idle_cpu, smp_processor_id());
 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
 }
 
@@ -3085,6 +3095,6 @@ static void rcu_dynticks_task_enter(void)
 static void rcu_dynticks_task_exit(void)
 {
 #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL)
-       ACCESS_ONCE(current->rcu_tasks_idle_cpu) = -1;
+       WRITE_ONCE(current->rcu_tasks_idle_cpu, -1);
 #endif /* #if defined(CONFIG_TASKS_RCU) && defined(CONFIG_NO_HZ_FULL) */
 }
index f92361efd0f55d970d851604d34800e7d109ecc8..3ea7ffc7d5c4a75378899d87c805314864804b51 100644 (file)
@@ -277,7 +277,7 @@ static void print_one_rcu_state(struct seq_file *m, struct rcu_state *rsp)
        seq_printf(m, "nfqs=%lu/nfqsng=%lu(%lu) fqlh=%lu oqlen=%ld/%ld\n",
                   rsp->n_force_qs, rsp->n_force_qs_ngp,
                   rsp->n_force_qs - rsp->n_force_qs_ngp,
-                  ACCESS_ONCE(rsp->n_force_qs_lh), rsp->qlen_lazy, rsp->qlen);
+                  READ_ONCE(rsp->n_force_qs_lh), rsp->qlen_lazy, rsp->qlen);
        for (rnp = &rsp->node[0]; rnp - &rsp->node[0] < rcu_num_nodes; rnp++) {
                if (rnp->level != level) {
                        seq_puts(m, "\n");
@@ -323,8 +323,8 @@ static void show_one_rcugp(struct seq_file *m, struct rcu_state *rsp)
        struct rcu_node *rnp = &rsp->node[0];
 
        raw_spin_lock_irqsave(&rnp->lock, flags);
-       completed = ACCESS_ONCE(rsp->completed);
-       gpnum = ACCESS_ONCE(rsp->gpnum);
+       completed = READ_ONCE(rsp->completed);
+       gpnum = READ_ONCE(rsp->gpnum);
        if (completed == gpnum)
                gpage = 0;
        else
index 1f133350da01e360bc6048b3a458e8b8cc0bdefc..afaecb7a799af235f63afb6877050cf348e4247c 100644 (file)
@@ -150,14 +150,14 @@ void __rcu_read_unlock(void)
                barrier();  /* critical section before exit code. */
                t->rcu_read_lock_nesting = INT_MIN;
                barrier();  /* assign before ->rcu_read_unlock_special load */
-               if (unlikely(ACCESS_ONCE(t->rcu_read_unlock_special.s)))
+               if (unlikely(READ_ONCE(t->rcu_read_unlock_special.s)))
                        rcu_read_unlock_special(t);
                barrier();  /* ->rcu_read_unlock_special load before assign */
                t->rcu_read_lock_nesting = 0;
        }
 #ifdef CONFIG_PROVE_LOCKING
        {
-               int rrln = ACCESS_ONCE(t->rcu_read_lock_nesting);
+               int rrln = READ_ONCE(t->rcu_read_lock_nesting);
 
                WARN_ON_ONCE(rrln < 0 && rrln > INT_MIN / 2);
        }
@@ -389,17 +389,17 @@ module_param(rcu_cpu_stall_timeout, int, 0644);
 
 int rcu_jiffies_till_stall_check(void)
 {
-       int till_stall_check = ACCESS_ONCE(rcu_cpu_stall_timeout);
+       int till_stall_check = READ_ONCE(rcu_cpu_stall_timeout);
 
        /*
         * Limit check must be consistent with the Kconfig limits
         * for CONFIG_RCU_CPU_STALL_TIMEOUT.
         */
        if (till_stall_check < 3) {
-               ACCESS_ONCE(rcu_cpu_stall_timeout) = 3;
+               WRITE_ONCE(rcu_cpu_stall_timeout, 3);
                till_stall_check = 3;
        } else if (till_stall_check > 300) {
-               ACCESS_ONCE(rcu_cpu_stall_timeout) = 300;
+               WRITE_ONCE(rcu_cpu_stall_timeout, 300);
                till_stall_check = 300;
        }
        return till_stall_check * HZ + RCU_STALL_DELAY_DELTA;
@@ -550,12 +550,12 @@ static void check_holdout_task(struct task_struct *t,
 {
        int cpu;
 
-       if (!ACCESS_ONCE(t->rcu_tasks_holdout) ||
-           t->rcu_tasks_nvcsw != ACCESS_ONCE(t->nvcsw) ||
-           !ACCESS_ONCE(t->on_rq) ||
+       if (!READ_ONCE(t->rcu_tasks_holdout) ||
+           t->rcu_tasks_nvcsw != READ_ONCE(t->nvcsw) ||
+           !READ_ONCE(t->on_rq) ||
            (IS_ENABLED(CONFIG_NO_HZ_FULL) &&
             !is_idle_task(t) && t->rcu_tasks_idle_cpu >= 0)) {
-               ACCESS_ONCE(t->rcu_tasks_holdout) = false;
+               WRITE_ONCE(t->rcu_tasks_holdout, false);
                list_del_init(&t->rcu_tasks_holdout_list);
                put_task_struct(t);
                return;
@@ -639,11 +639,11 @@ static int __noreturn rcu_tasks_kthread(void *arg)
                 */
                rcu_read_lock();
                for_each_process_thread(g, t) {
-                       if (t != current && ACCESS_ONCE(t->on_rq) &&
+                       if (t != current && READ_ONCE(t->on_rq) &&
                            !is_idle_task(t)) {
                                get_task_struct(t);
-                               t->rcu_tasks_nvcsw = ACCESS_ONCE(t->nvcsw);
-                               ACCESS_ONCE(t->rcu_tasks_holdout) = true;
+                               t->rcu_tasks_nvcsw = READ_ONCE(t->nvcsw);
+                               WRITE_ONCE(t->rcu_tasks_holdout, true);
                                list_add(&t->rcu_tasks_holdout_list,
                                         &rcu_tasks_holdouts);
                        }
@@ -672,7 +672,7 @@ static int __noreturn rcu_tasks_kthread(void *arg)
                        struct task_struct *t1;
 
                        schedule_timeout_interruptible(HZ);
-                       rtst = ACCESS_ONCE(rcu_task_stall_timeout);
+                       rtst = READ_ONCE(rcu_task_stall_timeout);
                        needreport = rtst > 0 &&
                                     time_after(jiffies, lastreport + rtst);
                        if (needreport)
@@ -728,7 +728,7 @@ static void rcu_spawn_tasks_kthread(void)
        static struct task_struct *rcu_tasks_kthread_ptr;
        struct task_struct *t;
 
-       if (ACCESS_ONCE(rcu_tasks_kthread_ptr)) {
+       if (READ_ONCE(rcu_tasks_kthread_ptr)) {
                smp_mb(); /* Ensure caller sees full kthread. */
                return;
        }
@@ -740,7 +740,7 @@ static void rcu_spawn_tasks_kthread(void)
        t = kthread_run(rcu_tasks_kthread, NULL, "rcu_tasks_kthread");
        BUG_ON(IS_ERR(t));
        smp_mb(); /* Ensure others see full kthread. */
-       ACCESS_ONCE(rcu_tasks_kthread_ptr) = t;
+       WRITE_ONCE(rcu_tasks_kthread_ptr, t);
        mutex_unlock(&rcu_tasks_kthread_mutex);
 }
 
index 46be8702487561cd88a7895fea8c6401d72e9ce6..67687973ce80d63d3f52698fb4b738b76964b896 100644 (file)
@@ -11,7 +11,7 @@ ifneq ($(CONFIG_SCHED_OMIT_FRAME_POINTER),y)
 CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer
 endif
 
-obj-y += core.o proc.o clock.o cputime.o
+obj-y += core.o loadavg.o clock.o cputime.o
 obj-y += idle_task.o fair.o rt.o deadline.o stop_task.o
 obj-y += wait.o completion.o idle.o
 obj-$(CONFIG_SMP) += cpupri.o cpudeadline.o
index eae160dd669d9d8d58bb911595c6391b2732edb4..750ed601ddf78e6dcdc5f10818c98b34b5feea3a 100644 (file)
@@ -1,5 +1,3 @@
-#ifdef CONFIG_SCHED_AUTOGROUP
-
 #include "sched.h"
 
 #include <linux/proc_fs.h>
@@ -141,7 +139,7 @@ autogroup_move_group(struct task_struct *p, struct autogroup *ag)
 
        p->signal->autogroup = autogroup_kref_get(ag);
 
-       if (!ACCESS_ONCE(sysctl_sched_autogroup_enabled))
+       if (!READ_ONCE(sysctl_sched_autogroup_enabled))
                goto out;
 
        for_each_thread(p, t)
@@ -249,5 +247,3 @@ int autogroup_path(struct task_group *tg, char *buf, int buflen)
        return snprintf(buf, buflen, "%s-%ld", "/autogroup", tg->autogroup->id);
 }
 #endif /* CONFIG_SCHED_DEBUG */
-
-#endif /* CONFIG_SCHED_AUTOGROUP */
index 8bd047142816dea81894bb27ccc3c78a38ac3d61..890c95f2587a4d8c530c1a5df69eef8a65e5eaf7 100644 (file)
@@ -29,7 +29,7 @@ extern bool task_wants_autogroup(struct task_struct *p, struct task_group *tg);
 static inline struct task_group *
 autogroup_task_group(struct task_struct *p, struct task_group *tg)
 {
-       int enabled = ACCESS_ONCE(sysctl_sched_autogroup_enabled);
+       int enabled = READ_ONCE(sysctl_sched_autogroup_enabled);
 
        if (enabled && task_wants_autogroup(p, tg))
                return p->signal->autogroup->tg;
index b8f48763579bf556f359748c1eef03165a2ac118..c86935a7f1f813664476d311ec43d115efdd2a30 100644 (file)
 #define CREATE_TRACE_POINTS
 #include <trace/events/sched.h>
 
-void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period)
-{
-       unsigned long delta;
-       ktime_t soft, hard, now;
-
-       for (;;) {
-               if (hrtimer_active(period_timer))
-                       break;
-
-               now = hrtimer_cb_get_time(period_timer);
-               hrtimer_forward(period_timer, now, period);
-
-               soft = hrtimer_get_softexpires(period_timer);
-               hard = hrtimer_get_expires(period_timer);
-               delta = ktime_to_ns(ktime_sub(hard, soft));
-               __hrtimer_start_range_ns(period_timer, soft, delta,
-                                        HRTIMER_MODE_ABS_PINNED, 0);
-       }
-}
-
 DEFINE_MUTEX(sched_domains_mutex);
 DEFINE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
 
@@ -355,12 +335,11 @@ static enum hrtimer_restart hrtick(struct hrtimer *timer)
 
 #ifdef CONFIG_SMP
 
-static int __hrtick_restart(struct rq *rq)
+static void __hrtick_restart(struct rq *rq)
 {
        struct hrtimer *timer = &rq->hrtick_timer;
-       ktime_t time = hrtimer_get_softexpires(timer);
 
-       return __hrtimer_start_range_ns(timer, time, 0, HRTIMER_MODE_ABS_PINNED, 0);
+       hrtimer_start_expires(timer, HRTIMER_MODE_ABS_PINNED);
 }
 
 /*
@@ -440,8 +419,8 @@ void hrtick_start(struct rq *rq, u64 delay)
         * doesn't make sense. Rely on vruntime for fairness.
         */
        delay = max_t(u64, delay, 10000LL);
-       __hrtimer_start_range_ns(&rq->hrtick_timer, ns_to_ktime(delay), 0,
-                       HRTIMER_MODE_REL_PINNED, 0);
+       hrtimer_start(&rq->hrtick_timer, ns_to_ktime(delay),
+                     HRTIMER_MODE_REL_PINNED);
 }
 
 static inline void init_hrtick(void)
@@ -511,7 +490,7 @@ static bool set_nr_and_not_polling(struct task_struct *p)
 static bool set_nr_if_polling(struct task_struct *p)
 {
        struct thread_info *ti = task_thread_info(p);
-       typeof(ti->flags) old, val = ACCESS_ONCE(ti->flags);
+       typeof(ti->flags) old, val = READ_ONCE(ti->flags);
 
        for (;;) {
                if (!(val & _TIF_POLLING_NRFLAG))
@@ -541,6 +520,52 @@ static bool set_nr_if_polling(struct task_struct *p)
 #endif
 #endif
 
+void wake_q_add(struct wake_q_head *head, struct task_struct *task)
+{
+       struct wake_q_node *node = &task->wake_q;
+
+       /*
+        * Atomically grab the task, if ->wake_q is !nil already it means
+        * its already queued (either by us or someone else) and will get the
+        * wakeup due to that.
+        *
+        * This cmpxchg() implies a full barrier, which pairs with the write
+        * barrier implied by the wakeup in wake_up_list().
+        */
+       if (cmpxchg(&node->next, NULL, WAKE_Q_TAIL))
+               return;
+
+       get_task_struct(task);
+
+       /*
+        * The head is context local, there can be no concurrency.
+        */
+       *head->lastp = node;
+       head->lastp = &node->next;
+}
+
+void wake_up_q(struct wake_q_head *head)
+{
+       struct wake_q_node *node = head->first;
+
+       while (node != WAKE_Q_TAIL) {
+               struct task_struct *task;
+
+               task = container_of(node, struct task_struct, wake_q);
+               BUG_ON(!task);
+               /* task can safely be re-inserted now */
+               node = node->next;
+               task->wake_q.next = NULL;
+
+               /*
+                * wake_up_process() implies a wmb() to pair with the queueing
+                * in wake_q_add() so as not to miss wakeups.
+                */
+               wake_up_process(task);
+               put_task_struct(task);
+       }
+}
+
 /*
  * resched_curr - mark rq's current task 'to be rescheduled now'.
  *
@@ -593,13 +618,12 @@ void resched_cpu(int cpu)
  * selecting an idle cpu will add more delays to the timers than intended
  * (as that cpu's timer base may not be uptodate wrt jiffies etc).
  */
-int get_nohz_timer_target(int pinned)
+int get_nohz_timer_target(void)
 {
-       int cpu = smp_processor_id();
-       int i;
+       int i, cpu = smp_processor_id();
        struct sched_domain *sd;
 
-       if (pinned || !get_sysctl_timer_migration() || !idle_cpu(cpu))
+       if (!idle_cpu(cpu))
                return cpu;
 
        rcu_read_lock();
@@ -1049,7 +1073,7 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
                if (p->sched_class->migrate_task_rq)
                        p->sched_class->migrate_task_rq(p, new_cpu);
                p->se.nr_migrations++;
-               perf_sw_event_sched(PERF_COUNT_SW_CPU_MIGRATIONS, 1, 0);
+               perf_event_task_migrate(p);
        }
 
        __set_task_cpu(p, new_cpu);
@@ -2105,12 +2129,15 @@ void wake_up_new_task(struct task_struct *p)
 
 #ifdef CONFIG_PREEMPT_NOTIFIERS
 
+static struct static_key preempt_notifier_key = STATIC_KEY_INIT_FALSE;
+
 /**
  * preempt_notifier_register - tell me when current is being preempted & rescheduled
  * @notifier: notifier struct to register
  */
 void preempt_notifier_register(struct preempt_notifier *notifier)
 {
+       static_key_slow_inc(&preempt_notifier_key);
        hlist_add_head(&notifier->link, &current->preempt_notifiers);
 }
 EXPORT_SYMBOL_GPL(preempt_notifier_register);
@@ -2119,15 +2146,16 @@ EXPORT_SYMBOL_GPL(preempt_notifier_register);
  * preempt_notifier_unregister - no longer interested in preemption notifications
  * @notifier: notifier struct to unregister
  *
- * This is safe to call from within a preemption notifier.
+ * This is *not* safe to call from within a preemption notifier.
  */
 void preempt_notifier_unregister(struct preempt_notifier *notifier)
 {
        hlist_del(&notifier->link);
+       static_key_slow_dec(&preempt_notifier_key);
 }
 EXPORT_SYMBOL_GPL(preempt_notifier_unregister);
 
-static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
+static void __fire_sched_in_preempt_notifiers(struct task_struct *curr)
 {
        struct preempt_notifier *notifier;
 
@@ -2135,9 +2163,15 @@ static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
                notifier->ops->sched_in(notifier, raw_smp_processor_id());
 }
 
+static __always_inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
+{
+       if (static_key_false(&preempt_notifier_key))
+               __fire_sched_in_preempt_notifiers(curr);
+}
+
 static void
-fire_sched_out_preempt_notifiers(struct task_struct *curr,
-                                struct task_struct *next)
+__fire_sched_out_preempt_notifiers(struct task_struct *curr,
+                                  struct task_struct *next)
 {
        struct preempt_notifier *notifier;
 
@@ -2145,13 +2179,21 @@ fire_sched_out_preempt_notifiers(struct task_struct *curr,
                notifier->ops->sched_out(notifier, next);
 }
 
+static __always_inline void
+fire_sched_out_preempt_notifiers(struct task_struct *curr,
+                                struct task_struct *next)
+{
+       if (static_key_false(&preempt_notifier_key))
+               __fire_sched_out_preempt_notifiers(curr, next);
+}
+
 #else /* !CONFIG_PREEMPT_NOTIFIERS */
 
-static void fire_sched_in_preempt_notifiers(struct task_struct *curr)
+static inline void fire_sched_in_preempt_notifiers(struct task_struct *curr)
 {
 }
 
-static void
+static inline void
 fire_sched_out_preempt_notifiers(struct task_struct *curr,
                                 struct task_struct *next)
 {
@@ -2396,9 +2438,9 @@ unsigned long nr_iowait_cpu(int cpu)
 
 void get_iowait_load(unsigned long *nr_waiters, unsigned long *load)
 {
-       struct rq *this = this_rq();
-       *nr_waiters = atomic_read(&this->nr_iowait);
-       *load = this->cpu_load[0];
+       struct rq *rq = this_rq();
+       *nr_waiters = atomic_read(&rq->nr_iowait);
+       *load = rq->load.weight;
 }
 
 #ifdef CONFIG_SMP
@@ -2496,6 +2538,7 @@ void scheduler_tick(void)
        update_rq_clock(rq);
        curr->sched_class->task_tick(rq, curr, 0);
        update_cpu_load_active(rq);
+       calc_global_load_tick(rq);
        raw_spin_unlock(&rq->lock);
 
        perf_event_task_tick();
@@ -2524,7 +2567,7 @@ void scheduler_tick(void)
 u64 scheduler_tick_max_deferment(void)
 {
        struct rq *rq = this_rq();
-       unsigned long next, now = ACCESS_ONCE(jiffies);
+       unsigned long next, now = READ_ONCE(jiffies);
 
        next = rq->last_sched_tick + HZ;
 
@@ -2725,9 +2768,7 @@ again:
  *          - return from syscall or exception to user-space
  *          - return from interrupt-handler to user-space
  *
- * WARNING: all callers must re-check need_resched() afterward and reschedule
- * accordingly in case an event triggered the need for rescheduling (such as
- * an interrupt waking up a task) while preemption was disabled in __schedule().
+ * WARNING: must be called with preemption disabled!
  */
 static void __sched __schedule(void)
 {
@@ -2736,7 +2777,6 @@ static void __sched __schedule(void)
        struct rq *rq;
        int cpu;
 
-       preempt_disable();
        cpu = smp_processor_id();
        rq = cpu_rq(cpu);
        rcu_note_context_switch();
@@ -2800,8 +2840,6 @@ static void __sched __schedule(void)
                raw_spin_unlock_irq(&rq->lock);
 
        post_schedule(rq);
-
-       sched_preempt_enable_no_resched();
 }
 
 static inline void sched_submit_work(struct task_struct *tsk)
@@ -2822,7 +2860,9 @@ asmlinkage __visible void __sched schedule(void)
 
        sched_submit_work(tsk);
        do {
+               preempt_disable();
                __schedule();
+               sched_preempt_enable_no_resched();
        } while (need_resched());
 }
 EXPORT_SYMBOL(schedule);
@@ -2861,15 +2901,14 @@ void __sched schedule_preempt_disabled(void)
 static void __sched notrace preempt_schedule_common(void)
 {
        do {
-               __preempt_count_add(PREEMPT_ACTIVE);
+               preempt_active_enter();
                __schedule();
-               __preempt_count_sub(PREEMPT_ACTIVE);
+               preempt_active_exit();
 
                /*
                 * Check again in case we missed a preemption opportunity
                 * between schedule and now.
                 */
-               barrier();
        } while (need_resched());
 }
 
@@ -2893,9 +2932,8 @@ asmlinkage __visible void __sched notrace preempt_schedule(void)
 NOKPROBE_SYMBOL(preempt_schedule);
 EXPORT_SYMBOL(preempt_schedule);
 
-#ifdef CONFIG_CONTEXT_TRACKING
 /**
- * preempt_schedule_context - preempt_schedule called by tracing
+ * preempt_schedule_notrace - preempt_schedule called by tracing
  *
  * The tracing infrastructure uses preempt_enable_notrace to prevent
  * recursion and tracing preempt enabling caused by the tracing
@@ -2908,7 +2946,7 @@ EXPORT_SYMBOL(preempt_schedule);
  * instead of preempt_schedule() to exit user context if needed before
  * calling the scheduler.
  */
-asmlinkage __visible void __sched notrace preempt_schedule_context(void)
+asmlinkage __visible void __sched notrace preempt_schedule_notrace(void)
 {
        enum ctx_state prev_ctx;
 
@@ -2916,7 +2954,13 @@ asmlinkage __visible void __sched notrace preempt_schedule_context(void)
                return;
 
        do {
-               __preempt_count_add(PREEMPT_ACTIVE);
+               /*
+                * Use raw __prempt_count() ops that don't call function.
+                * We can't call functions before disabling preemption which
+                * disarm preemption tracing recursions.
+                */
+               __preempt_count_add(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET);
+               barrier();
                /*
                 * Needs preempt disabled in case user_exit() is traced
                 * and the tracer calls preempt_enable_notrace() causing
@@ -2926,12 +2970,11 @@ asmlinkage __visible void __sched notrace preempt_schedule_context(void)
                __schedule();
                exception_exit(prev_ctx);
 
-               __preempt_count_sub(PREEMPT_ACTIVE);
                barrier();
+               __preempt_count_sub(PREEMPT_ACTIVE + PREEMPT_DISABLE_OFFSET);
        } while (need_resched());
 }
-EXPORT_SYMBOL_GPL(preempt_schedule_context);
-#endif /* CONFIG_CONTEXT_TRACKING */
+EXPORT_SYMBOL_GPL(preempt_schedule_notrace);
 
 #endif /* CONFIG_PREEMPT */
 
@@ -2951,17 +2994,11 @@ asmlinkage __visible void __sched preempt_schedule_irq(void)
        prev_state = exception_enter();
 
        do {
-               __preempt_count_add(PREEMPT_ACTIVE);
+               preempt_active_enter();
                local_irq_enable();
                __schedule();
                local_irq_disable();
-               __preempt_count_sub(PREEMPT_ACTIVE);
-
-               /*
-                * Check again in case we missed a preemption opportunity
-                * between schedule and now.
-                */
-               barrier();
+               preempt_active_exit();
        } while (need_resched());
 
        exception_exit(prev_state);
@@ -3039,7 +3076,6 @@ void rt_mutex_setprio(struct task_struct *p, int prio)
                if (!dl_prio(p->normal_prio) ||
                    (pi_task && dl_entity_preempt(&pi_task->dl, &p->dl))) {
                        p->dl.dl_boosted = 1;
-                       p->dl.dl_throttled = 0;
                        enqueue_flag = ENQUEUE_REPLENISH;
                } else
                        p->dl.dl_boosted = 0;
@@ -3299,15 +3335,18 @@ static void __setscheduler_params(struct task_struct *p,
 
 /* Actually do priority change: must hold pi & rq lock. */
 static void __setscheduler(struct rq *rq, struct task_struct *p,
-                          const struct sched_attr *attr)
+                          const struct sched_attr *attr, bool keep_boost)
 {
        __setscheduler_params(p, attr);
 
        /*
-        * If we get here, there was no pi waiters boosting the
-        * task. It is safe to use the normal prio.
+        * Keep a potential priority boosting if called from
+        * sched_setscheduler().
         */
-       p->prio = normal_prio(p);
+       if (keep_boost)
+               p->prio = rt_mutex_get_effective_prio(p, normal_prio(p));
+       else
+               p->prio = normal_prio(p);
 
        if (dl_prio(p->prio))
                p->sched_class = &dl_sched_class;
@@ -3407,7 +3446,7 @@ static int __sched_setscheduler(struct task_struct *p,
        int newprio = dl_policy(attr->sched_policy) ? MAX_DL_PRIO - 1 :
                      MAX_RT_PRIO - 1 - attr->sched_priority;
        int retval, oldprio, oldpolicy = -1, queued, running;
-       int policy = attr->sched_policy;
+       int new_effective_prio, policy = attr->sched_policy;
        unsigned long flags;
        const struct sched_class *prev_class;
        struct rq *rq;
@@ -3589,15 +3628,14 @@ change:
        oldprio = p->prio;
 
        /*
-        * Special case for priority boosted tasks.
-        *
-        * If the new priority is lower or equal (user space view)
-        * than the current (boosted) priority, we just store the new
+        * Take priority boosted tasks into account. If the new
+        * effective priority is unchanged, we just store the new
         * normal parameters and do not touch the scheduler class and
         * the runqueue. This will be done when the task deboost
         * itself.
         */
-       if (rt_mutex_check_prio(p, newprio)) {
+       new_effective_prio = rt_mutex_get_effective_prio(p, newprio);
+       if (new_effective_prio == oldprio) {
                __setscheduler_params(p, attr);
                task_rq_unlock(rq, p, &flags);
                return 0;
@@ -3611,7 +3649,7 @@ change:
                put_prev_task(rq, p);
 
        prev_class = p->sched_class;
-       __setscheduler(rq, p, attr);
+       __setscheduler(rq, p, attr, true);
 
        if (running)
                p->sched_class->set_curr_task(rq);
@@ -4386,10 +4424,7 @@ long __sched io_schedule_timeout(long timeout)
        long ret;
 
        current->in_iowait = 1;
-       if (old_iowait)
-               blk_schedule_flush_plug(current);
-       else
-               blk_flush_plug(current);
+       blk_schedule_flush_plug(current);
 
        delayacct_blkio_start();
        rq = raw_rq();
@@ -5314,7 +5349,7 @@ static struct notifier_block migration_notifier = {
        .priority = CPU_PRI_MIGRATION,
 };
 
-static void __cpuinit set_cpu_rq_start_time(void)
+static void set_cpu_rq_start_time(void)
 {
        int cpu = smp_processor_id();
        struct rq *rq = cpu_rq(cpu);
@@ -6996,27 +7031,23 @@ static int cpuset_cpu_inactive(struct notifier_block *nfb, unsigned long action,
        unsigned long flags;
        long cpu = (long)hcpu;
        struct dl_bw *dl_b;
+       bool overflow;
+       int cpus;
 
-       switch (action & ~CPU_TASKS_FROZEN) {
+       switch (action) {
        case CPU_DOWN_PREPARE:
-               /* explicitly allow suspend */
-               if (!(action & CPU_TASKS_FROZEN)) {
-                       bool overflow;
-                       int cpus;
-
-                       rcu_read_lock_sched();
-                       dl_b = dl_bw_of(cpu);
+               rcu_read_lock_sched();
+               dl_b = dl_bw_of(cpu);
 
-                       raw_spin_lock_irqsave(&dl_b->lock, flags);
-                       cpus = dl_bw_cpus(cpu);
-                       overflow = __dl_overflow(dl_b, cpus, 0, 0);
-                       raw_spin_unlock_irqrestore(&dl_b->lock, flags);
+               raw_spin_lock_irqsave(&dl_b->lock, flags);
+               cpus = dl_bw_cpus(cpu);
+               overflow = __dl_overflow(dl_b, cpus, 0, 0);
+               raw_spin_unlock_irqrestore(&dl_b->lock, flags);
 
-                       rcu_read_unlock_sched();
+               rcu_read_unlock_sched();
 
-                       if (overflow)
-                               return notifier_from_errno(-EBUSY);
-               }
+               if (overflow)
+                       return notifier_from_errno(-EBUSY);
                cpuset_update_active_cpus(false);
                break;
        case CPU_DOWN_PREPARE_FROZEN:
@@ -7075,8 +7106,6 @@ void __init sched_init_smp(void)
 }
 #endif /* CONFIG_SMP */
 
-const_debug unsigned int sysctl_timer_migration = 1;
-
 int in_sched_functions(unsigned long addr)
 {
        return in_lock_functions(addr) ||
@@ -7348,7 +7377,7 @@ static void normalize_task(struct rq *rq, struct task_struct *p)
        queued = task_on_rq_queued(p);
        if (queued)
                dequeue_task(rq, p, 0);
-       __setscheduler(rq, p, &attr);
+       __setscheduler(rq, p, &attr, false);
        if (queued) {
                enqueue_task(rq, p, 0);
                resched_curr(rq);
@@ -7741,11 +7770,11 @@ static long sched_group_rt_runtime(struct task_group *tg)
        return rt_runtime_us;
 }
 
-static int sched_group_set_rt_period(struct task_group *tg, long rt_period_us)
+static int sched_group_set_rt_period(struct task_group *tg, u64 rt_period_us)
 {
        u64 rt_runtime, rt_period;
 
-       rt_period = (u64)rt_period_us * NSEC_PER_USEC;
+       rt_period = rt_period_us * NSEC_PER_USEC;
        rt_runtime = tg->rt_bandwidth.rt_runtime;
 
        return tg_set_rt_bandwidth(tg, rt_period, rt_runtime);
@@ -8112,10 +8141,8 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
 
        __refill_cfs_bandwidth_runtime(cfs_b);
        /* restart the period timer (if active) to handle new period expiry */
-       if (runtime_enabled && cfs_b->timer_active) {
-               /* force a reprogram */
-               __start_cfs_bandwidth(cfs_b, true);
-       }
+       if (runtime_enabled)
+               start_cfs_bandwidth(cfs_b);
        raw_spin_unlock_irq(&cfs_b->lock);
 
        for_each_online_cpu(i) {
index 8394b1ee600c38ba6e9144a6326369b6ef0cdacd..f5a64ffad176f12b01381cb1dc2e25a05f02508d 100644 (file)
@@ -567,7 +567,7 @@ static void cputime_advance(cputime_t *counter, cputime_t new)
 {
        cputime_t old;
 
-       while (new > (old = ACCESS_ONCE(*counter)))
+       while (new > (old = READ_ONCE(*counter)))
                cmpxchg_cputime(counter, old, new);
 }
 
index 5e95145088fd37b3d07ccac66c3cd58f7effe10a..eac20c557a55cc83f8e9d7e62578868ba9436aff 100644 (file)
@@ -503,8 +503,6 @@ static int start_dl_timer(struct sched_dl_entity *dl_se, bool boosted)
        struct dl_rq *dl_rq = dl_rq_of_se(dl_se);
        struct rq *rq = rq_of_dl_rq(dl_rq);
        ktime_t now, act;
-       ktime_t soft, hard;
-       unsigned long range;
        s64 delta;
 
        if (boosted)
@@ -527,15 +525,9 @@ static int start_dl_timer(struct sched_dl_entity *dl_se, bool boosted)
        if (ktime_us_delta(act, now) < 0)
                return 0;
 
-       hrtimer_set_expires(&dl_se->dl_timer, act);
+       hrtimer_start(&dl_se->dl_timer, act, HRTIMER_MODE_ABS);
 
-       soft = hrtimer_get_softexpires(&dl_se->dl_timer);
-       hard = hrtimer_get_expires(&dl_se->dl_timer);
-       range = ktime_to_ns(ktime_sub(hard, soft));
-       __hrtimer_start_range_ns(&dl_se->dl_timer, soft,
-                                range, HRTIMER_MODE_ABS, 0);
-
-       return hrtimer_active(&dl_se->dl_timer);
+       return 1;
 }
 
 /*
@@ -640,7 +632,7 @@ void init_dl_task_timer(struct sched_dl_entity *dl_se)
 }
 
 static
-int dl_runtime_exceeded(struct rq *rq, struct sched_dl_entity *dl_se)
+int dl_runtime_exceeded(struct sched_dl_entity *dl_se)
 {
        return (dl_se->runtime <= 0);
 }
@@ -684,7 +676,7 @@ static void update_curr_dl(struct rq *rq)
        sched_rt_avg_update(rq, delta_exec);
 
        dl_se->runtime -= dl_se->dl_yielded ? 0 : delta_exec;
-       if (dl_runtime_exceeded(rq, dl_se)) {
+       if (dl_runtime_exceeded(dl_se)) {
                dl_se->dl_throttled = 1;
                __dequeue_task_dl(rq, curr, 0);
                if (unlikely(!start_dl_timer(dl_se, curr->dl.dl_boosted)))
@@ -995,7 +987,7 @@ select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
        rq = cpu_rq(cpu);
 
        rcu_read_lock();
-       curr = ACCESS_ONCE(rq->curr); /* unlocked access */
+       curr = READ_ONCE(rq->curr); /* unlocked access */
 
        /*
         * If we are dealing with a -deadline task, we must
@@ -1012,7 +1004,9 @@ select_task_rq_dl(struct task_struct *p, int cpu, int sd_flag, int flags)
            (p->nr_cpus_allowed > 1)) {
                int target = find_later_rq(p);
 
-               if (target != -1)
+               if (target != -1 &&
+                               dl_time_before(p->dl.deadline,
+                                       cpu_rq(target)->dl.earliest_dl.curr))
                        cpu = target;
        }
        rcu_read_unlock();
@@ -1230,6 +1224,32 @@ next_node:
        return NULL;
 }
 
+/*
+ * Return the earliest pushable rq's task, which is suitable to be executed
+ * on the CPU, NULL otherwise:
+ */
+static struct task_struct *pick_earliest_pushable_dl_task(struct rq *rq, int cpu)
+{
+       struct rb_node *next_node = rq->dl.pushable_dl_tasks_leftmost;
+       struct task_struct *p = NULL;
+
+       if (!has_pushable_dl_tasks(rq))
+               return NULL;
+
+next_node:
+       if (next_node) {
+               p = rb_entry(next_node, struct task_struct, pushable_dl_tasks);
+
+               if (pick_dl_task(rq, p, cpu))
+                       return p;
+
+               next_node = rb_next(next_node);
+               goto next_node;
+       }
+
+       return NULL;
+}
+
 static DEFINE_PER_CPU(cpumask_var_t, local_cpu_mask_dl);
 
 static int find_later_rq(struct task_struct *task)
@@ -1333,6 +1353,17 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
 
                later_rq = cpu_rq(cpu);
 
+               if (!dl_time_before(task->dl.deadline,
+                                       later_rq->dl.earliest_dl.curr)) {
+                       /*
+                        * Target rq has tasks of equal or earlier deadline,
+                        * retrying does not release any lock and is unlikely
+                        * to yield a different result.
+                        */
+                       later_rq = NULL;
+                       break;
+               }
+
                /* Retry if something changed. */
                if (double_lock_balance(rq, later_rq)) {
                        if (unlikely(task_rq(task) != rq ||
@@ -1514,7 +1545,7 @@ static int pull_dl_task(struct rq *this_rq)
                if (src_rq->dl.dl_nr_running <= 1)
                        goto skip;
 
-               p = pick_next_earliest_dl_task(src_rq, this_cpu);
+               p = pick_earliest_pushable_dl_task(src_rq, this_cpu);
 
                /*
                 * We found a task to be pulled if:
@@ -1659,7 +1690,7 @@ static void rq_offline_dl(struct rq *rq)
        cpudl_clear_freecpu(&rq->rd->cpudl, rq->cpu);
 }
 
-void init_sched_dl_class(void)
+void __init init_sched_dl_class(void)
 {
        unsigned int i;
 
index a245c1fc6f0a610f17e2d13635306d681e2ef821..315c68e015d955d6227a83b6b951482cffd8a68e 100644 (file)
@@ -132,12 +132,14 @@ print_task(struct seq_file *m, struct rq *rq, struct task_struct *p)
                p->prio);
 #ifdef CONFIG_SCHEDSTATS
        SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
-               SPLIT_NS(p->se.vruntime),
+               SPLIT_NS(p->se.statistics.wait_sum),
                SPLIT_NS(p->se.sum_exec_runtime),
                SPLIT_NS(p->se.statistics.sum_sleep_runtime));
 #else
-       SEQ_printf(m, "%15Ld %15Ld %15Ld.%06ld %15Ld.%06ld %15Ld.%06ld",
-               0LL, 0LL, 0LL, 0L, 0LL, 0L, 0LL, 0L);
+       SEQ_printf(m, "%9Ld.%06ld %9Ld.%06ld %9Ld.%06ld",
+               0LL, 0L,
+               SPLIT_NS(p->se.sum_exec_runtime),
+               0LL, 0L);
 #endif
 #ifdef CONFIG_NUMA_BALANCING
        SEQ_printf(m, " %d", task_node(p));
@@ -156,7 +158,7 @@ static void print_rq(struct seq_file *m, struct rq *rq, int rq_cpu)
        SEQ_printf(m,
        "\nrunnable tasks:\n"
        "            task   PID         tree-key  switches  prio"
-       "     exec-runtime         sum-exec        sum-sleep\n"
+       "     wait-time             sum-exec        sum-sleep\n"
        "------------------------------------------------------"
        "----------------------------------------------------\n");
 
@@ -230,8 +232,6 @@ void print_cfs_rq(struct seq_file *m, int cpu, struct cfs_rq *cfs_rq)
 #endif
 #endif
 #ifdef CONFIG_CFS_BANDWIDTH
-       SEQ_printf(m, "  .%-30s: %d\n", "tg->cfs_bandwidth.timer_active",
-                       cfs_rq->tg->cfs_bandwidth.timer_active);
        SEQ_printf(m, "  .%-30s: %d\n", "throttled",
                        cfs_rq->throttled);
        SEQ_printf(m, "  .%-30s: %d\n", "throttle_count",
@@ -582,6 +582,7 @@ void proc_sched_show_task(struct task_struct *p, struct seq_file *m)
        nr_switches = p->nvcsw + p->nivcsw;
 
 #ifdef CONFIG_SCHEDSTATS
+       PN(se.statistics.sum_sleep_runtime);
        PN(se.statistics.wait_start);
        PN(se.statistics.sleep_start);
        PN(se.statistics.block_start);
index ffeaa4105e48a36105ecaea8967082e1e7a7af98..40a7fcbf491eb7d1f5735d0e9efd25c23d4d60a9 100644 (file)
@@ -141,9 +141,9 @@ static inline void update_load_set(struct load_weight *lw, unsigned long w)
  *
  * This idea comes from the SD scheduler of Con Kolivas:
  */
-static int get_update_sysctl_factor(void)
+static unsigned int get_update_sysctl_factor(void)
 {
-       unsigned int cpus = min_t(int, num_online_cpus(), 8);
+       unsigned int cpus = min_t(unsigned int, num_online_cpus(), 8);
        unsigned int factor;
 
        switch (sysctl_sched_tunable_scaling) {
@@ -576,7 +576,7 @@ int sched_proc_update_handler(struct ctl_table *table, int write,
                loff_t *ppos)
 {
        int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos);
-       int factor = get_update_sysctl_factor();
+       unsigned int factor = get_update_sysctl_factor();
 
        if (ret || !write)
                return ret;
@@ -834,7 +834,7 @@ static unsigned int task_nr_scan_windows(struct task_struct *p)
 
 static unsigned int task_scan_min(struct task_struct *p)
 {
-       unsigned int scan_size = ACCESS_ONCE(sysctl_numa_balancing_scan_size);
+       unsigned int scan_size = READ_ONCE(sysctl_numa_balancing_scan_size);
        unsigned int scan, floor;
        unsigned int windows = 1;
 
@@ -1198,11 +1198,9 @@ static void task_numa_assign(struct task_numa_env *env,
 static bool load_too_imbalanced(long src_load, long dst_load,
                                struct task_numa_env *env)
 {
+       long imb, old_imb;
+       long orig_src_load, orig_dst_load;
        long src_capacity, dst_capacity;
-       long orig_src_load;
-       long load_a, load_b;
-       long moved_load;
-       long imb;
 
        /*
         * The load is corrected for the CPU capacity available on each node.
@@ -1215,39 +1213,30 @@ static bool load_too_imbalanced(long src_load, long dst_load,
        dst_capacity = env->dst_stats.compute_capacity;
 
        /* We care about the slope of the imbalance, not the direction. */
-       load_a = dst_load;
-       load_b = src_load;
-       if (load_a < load_b)
-               swap(load_a, load_b);
+       if (dst_load < src_load)
+               swap(dst_load, src_load);
 
        /* Is the difference below the threshold? */
-       imb = load_a * src_capacity * 100 -
-               load_b * dst_capacity * env->imbalance_pct;
+       imb = dst_load * src_capacity * 100 -
+             src_load * dst_capacity * env->imbalance_pct;
        if (imb <= 0)
                return false;
 
        /*
         * The imbalance is above the allowed threshold.
-        * Allow a move that brings us closer to a balanced situation,
-        * without moving things past the point of balance.
+        * Compare it with the old imbalance.
         */
        orig_src_load = env->src_stats.load;
+       orig_dst_load = env->dst_stats.load;
 
-       /*
-        * In a task swap, there will be one load moving from src to dst,
-        * and another moving back. This is the net sum of both moves.
-        * A simple task move will always have a positive value.
-        * Allow the move if it brings the system closer to a balanced
-        * situation, without crossing over the balance point.
-        */
-       moved_load = orig_src_load - src_load;
+       if (orig_dst_load < orig_src_load)
+               swap(orig_dst_load, orig_src_load);
 
-       if (moved_load > 0)
-               /* Moving src -> dst. Did we overshoot balance? */
-               return src_load * dst_capacity < dst_load * src_capacity;
-       else
-               /* Moving dst -> src. Did we overshoot balance? */
-               return dst_load * src_capacity < src_load * dst_capacity;
+       old_imb = orig_dst_load * src_capacity * 100 -
+                 orig_src_load * dst_capacity * env->imbalance_pct;
+
+       /* Would this change make things worse? */
+       return (imb > old_imb);
 }
 
 /*
@@ -1409,6 +1398,30 @@ static void task_numa_find_cpu(struct task_numa_env *env,
        }
 }
 
+/* Only move tasks to a NUMA node less busy than the current node. */
+static bool numa_has_capacity(struct task_numa_env *env)
+{
+       struct numa_stats *src = &env->src_stats;
+       struct numa_stats *dst = &env->dst_stats;
+
+       if (src->has_free_capacity && !dst->has_free_capacity)
+               return false;
+
+       /*
+        * Only consider a task move if the source has a higher load
+        * than the destination, corrected for CPU capacity on each node.
+        *
+        *      src->load                dst->load
+        * --------------------- vs ---------------------
+        * src->compute_capacity    dst->compute_capacity
+        */
+       if (src->load * dst->compute_capacity >
+           dst->load * src->compute_capacity)
+               return true;
+
+       return false;
+}
+
 static int task_numa_migrate(struct task_struct *p)
 {
        struct task_numa_env env = {
@@ -1463,7 +1476,8 @@ static int task_numa_migrate(struct task_struct *p)
        update_numa_stats(&env.dst_stats, env.dst_nid);
 
        /* Try to find a spot on the preferred nid. */
-       task_numa_find_cpu(&env, taskimp, groupimp);
+       if (numa_has_capacity(&env))
+               task_numa_find_cpu(&env, taskimp, groupimp);
 
        /*
         * Look at other nodes in these cases:
@@ -1494,7 +1508,8 @@ static int task_numa_migrate(struct task_struct *p)
                        env.dist = dist;
                        env.dst_nid = nid;
                        update_numa_stats(&env.dst_stats, env.dst_nid);
-                       task_numa_find_cpu(&env, taskimp, groupimp);
+                       if (numa_has_capacity(&env))
+                               task_numa_find_cpu(&env, taskimp, groupimp);
                }
        }
 
@@ -1794,7 +1809,12 @@ static void task_numa_placement(struct task_struct *p)
        u64 runtime, period;
        spinlock_t *group_lock = NULL;
 
-       seq = ACCESS_ONCE(p->mm->numa_scan_seq);
+       /*
+        * The p->mm->numa_scan_seq field gets updated without
+        * exclusive access. Use READ_ONCE() here to ensure
+        * that the field is read in a single access:
+        */
+       seq = READ_ONCE(p->mm->numa_scan_seq);
        if (p->numa_scan_seq == seq)
                return;
        p->numa_scan_seq = seq;
@@ -1938,7 +1958,7 @@ static void task_numa_group(struct task_struct *p, int cpupid, int flags,
        }
 
        rcu_read_lock();
-       tsk = ACCESS_ONCE(cpu_rq(cpu)->curr);
+       tsk = READ_ONCE(cpu_rq(cpu)->curr);
 
        if (!cpupid_match_pid(tsk, cpupid))
                goto no_join;
@@ -2107,7 +2127,15 @@ void task_numa_fault(int last_cpupid, int mem_node, int pages, int flags)
 
 static void reset_ptenuma_scan(struct task_struct *p)
 {
-       ACCESS_ONCE(p->mm->numa_scan_seq)++;
+       /*
+        * We only did a read acquisition of the mmap sem, so
+        * p->mm->numa_scan_seq is written to without exclusive access
+        * and the update is not guaranteed to be atomic. That's not
+        * much of an issue though, since this is just used for
+        * statistical sampling. Use READ_ONCE/WRITE_ONCE, which are not
+        * expensive, to avoid any form of compiler optimizations:
+        */
+       WRITE_ONCE(p->mm->numa_scan_seq, READ_ONCE(p->mm->numa_scan_seq) + 1);
        p->mm->numa_scan_offset = 0;
 }
 
@@ -2181,7 +2209,7 @@ void task_numa_work(struct callback_head *work)
        }
        for (; vma; vma = vma->vm_next) {
                if (!vma_migratable(vma) || !vma_policy_mof(vma) ||
-                       is_vm_hugetlb_page(vma)) {
+                       is_vm_hugetlb_page(vma) || (vma->vm_flags & VM_MIXEDMAP)) {
                        continue;
                }
 
@@ -3476,16 +3504,7 @@ static int assign_cfs_rq_runtime(struct cfs_rq *cfs_rq)
        if (cfs_b->quota == RUNTIME_INF)
                amount = min_amount;
        else {
-               /*
-                * If the bandwidth pool has become inactive, then at least one
-                * period must have elapsed since the last consumption.
-                * Refresh the global state and ensure bandwidth timer becomes
-                * active.
-                */
-               if (!cfs_b->timer_active) {
-                       __refill_cfs_bandwidth_runtime(cfs_b);
-                       __start_cfs_bandwidth(cfs_b, false);
-               }
+               start_cfs_bandwidth(cfs_b);
 
                if (cfs_b->runtime > 0) {
                        amount = min(cfs_b->runtime, min_amount);
@@ -3634,6 +3653,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
        struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
        struct sched_entity *se;
        long task_delta, dequeue = 1;
+       bool empty;
 
        se = cfs_rq->tg->se[cpu_of(rq_of(cfs_rq))];
 
@@ -3663,13 +3683,21 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
        cfs_rq->throttled = 1;
        cfs_rq->throttled_clock = rq_clock(rq);
        raw_spin_lock(&cfs_b->lock);
+       empty = list_empty(&cfs_rq->throttled_list);
+
        /*
         * Add to the _head_ of the list, so that an already-started
         * distribute_cfs_runtime will not see us
         */
        list_add_rcu(&cfs_rq->throttled_list, &cfs_b->throttled_cfs_rq);
-       if (!cfs_b->timer_active)
-               __start_cfs_bandwidth(cfs_b, false);
+
+       /*
+        * If we're the first throttled task, make sure the bandwidth
+        * timer is running.
+        */
+       if (empty)
+               start_cfs_bandwidth(cfs_b);
+
        raw_spin_unlock(&cfs_b->lock);
 }
 
@@ -3784,13 +3812,6 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
        if (cfs_b->idle && !throttled)
                goto out_deactivate;
 
-       /*
-        * if we have relooped after returning idle once, we need to update our
-        * status as actually running, so that other cpus doing
-        * __start_cfs_bandwidth will stop trying to cancel us.
-        */
-       cfs_b->timer_active = 1;
-
        __refill_cfs_bandwidth_runtime(cfs_b);
 
        if (!throttled) {
@@ -3835,7 +3856,6 @@ static int do_sched_cfs_period_timer(struct cfs_bandwidth *cfs_b, int overrun)
        return 0;
 
 out_deactivate:
-       cfs_b->timer_active = 0;
        return 1;
 }
 
@@ -3850,7 +3870,7 @@ static const u64 cfs_bandwidth_slack_period = 5 * NSEC_PER_MSEC;
  * Are we near the end of the current quota period?
  *
  * Requires cfs_b->lock for hrtimer_expires_remaining to be safe against the
- * hrtimer base being cleared by __hrtimer_start_range_ns. In the case of
+ * hrtimer base being cleared by hrtimer_start. In the case of
  * migrate_hrtimers, base is never cleared, so we are fine.
  */
 static int runtime_refresh_within(struct cfs_bandwidth *cfs_b, u64 min_expire)
@@ -3878,8 +3898,9 @@ static void start_cfs_slack_bandwidth(struct cfs_bandwidth *cfs_b)
        if (runtime_refresh_within(cfs_b, min_left))
                return;
 
-       start_bandwidth_timer(&cfs_b->slack_timer,
-                               ns_to_ktime(cfs_bandwidth_slack_period));
+       hrtimer_start(&cfs_b->slack_timer,
+                       ns_to_ktime(cfs_bandwidth_slack_period),
+                       HRTIMER_MODE_REL);
 }
 
 /* we know any runtime found here is valid as update_curr() precedes return */
@@ -3999,6 +4020,7 @@ static enum hrtimer_restart sched_cfs_slack_timer(struct hrtimer *timer)
 {
        struct cfs_bandwidth *cfs_b =
                container_of(timer, struct cfs_bandwidth, slack_timer);
+
        do_sched_cfs_slack_timer(cfs_b);
 
        return HRTIMER_NORESTART;
@@ -4008,20 +4030,19 @@ static enum hrtimer_restart sched_cfs_period_timer(struct hrtimer *timer)
 {
        struct cfs_bandwidth *cfs_b =
                container_of(timer, struct cfs_bandwidth, period_timer);
-       ktime_t now;
        int overrun;
        int idle = 0;
 
        raw_spin_lock(&cfs_b->lock);
        for (;;) {
-               now = hrtimer_cb_get_time(timer);
-               overrun = hrtimer_forward(timer, now, cfs_b->period);
-
+               overrun = hrtimer_forward_now(timer, cfs_b->period);
                if (!overrun)
                        break;
 
                idle = do_sched_cfs_period_timer(cfs_b, overrun);
        }
+       if (idle)
+               cfs_b->period_active = 0;
        raw_spin_unlock(&cfs_b->lock);
 
        return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
@@ -4035,7 +4056,7 @@ void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
        cfs_b->period = ns_to_ktime(default_cfs_period());
 
        INIT_LIST_HEAD(&cfs_b->throttled_cfs_rq);
-       hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+       hrtimer_init(&cfs_b->period_timer, CLOCK_MONOTONIC, HRTIMER_MODE_ABS_PINNED);
        cfs_b->period_timer.function = sched_cfs_period_timer;
        hrtimer_init(&cfs_b->slack_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
        cfs_b->slack_timer.function = sched_cfs_slack_timer;
@@ -4047,28 +4068,15 @@ static void init_cfs_rq_runtime(struct cfs_rq *cfs_rq)
        INIT_LIST_HEAD(&cfs_rq->throttled_list);
 }
 
-/* requires cfs_b->lock, may release to reprogram timer */
-void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force)
+void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
 {
-       /*
-        * The timer may be active because we're trying to set a new bandwidth
-        * period or because we're racing with the tear-down path
-        * (timer_active==0 becomes visible before the hrtimer call-back
-        * terminates).  In either case we ensure that it's re-programmed
-        */
-       while (unlikely(hrtimer_active(&cfs_b->period_timer)) &&
-              hrtimer_try_to_cancel(&cfs_b->period_timer) < 0) {
-               /* bounce the lock to allow do_sched_cfs_period_timer to run */
-               raw_spin_unlock(&cfs_b->lock);
-               cpu_relax();
-               raw_spin_lock(&cfs_b->lock);
-               /* if someone else restarted the timer then we're done */
-               if (!force && cfs_b->timer_active)
-                       return;
-       }
+       lockdep_assert_held(&cfs_b->lock);
 
-       cfs_b->timer_active = 1;
-       start_bandwidth_timer(&cfs_b->period_timer, cfs_b->period);
+       if (!cfs_b->period_active) {
+               cfs_b->period_active = 1;
+               hrtimer_forward_now(&cfs_b->period_timer, cfs_b->period);
+               hrtimer_start_expires(&cfs_b->period_timer, HRTIMER_MODE_ABS_PINNED);
+       }
 }
 
 static void destroy_cfs_bandwidth(struct cfs_bandwidth *cfs_b)
@@ -4323,6 +4331,189 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
 }
 
 #ifdef CONFIG_SMP
+
+/*
+ * per rq 'load' arrray crap; XXX kill this.
+ */
+
+/*
+ * The exact cpuload at various idx values, calculated at every tick would be
+ * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
+ *
+ * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called
+ * on nth tick when cpu may be busy, then we have:
+ * load = ((2^idx - 1) / 2^idx)^(n-1) * load
+ * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load
+ *
+ * decay_load_missed() below does efficient calculation of
+ * load = ((2^idx - 1) / 2^idx)^(n-1) * load
+ * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load
+ *
+ * The calculation is approximated on a 128 point scale.
+ * degrade_zero_ticks is the number of ticks after which load at any
+ * particular idx is approximated to be zero.
+ * degrade_factor is a precomputed table, a row for each load idx.
+ * Each column corresponds to degradation factor for a power of two ticks,
+ * based on 128 point scale.
+ * Example:
+ * row 2, col 3 (=12) says that the degradation at load idx 2 after
+ * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8).
+ *
+ * With this power of 2 load factors, we can degrade the load n times
+ * by looking at 1 bits in n and doing as many mult/shift instead of
+ * n mult/shifts needed by the exact degradation.
+ */
+#define DEGRADE_SHIFT          7
+static const unsigned char
+               degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
+static const unsigned char
+               degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
+                                       {0, 0, 0, 0, 0, 0, 0, 0},
+                                       {64, 32, 8, 0, 0, 0, 0, 0},
+                                       {96, 72, 40, 12, 1, 0, 0},
+                                       {112, 98, 75, 43, 15, 1, 0},
+                                       {120, 112, 98, 76, 45, 16, 2} };
+
+/*
+ * Update cpu_load for any missed ticks, due to tickless idle. The backlog
+ * would be when CPU is idle and so we just decay the old load without
+ * adding any new load.
+ */
+static unsigned long
+decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
+{
+       int j = 0;
+
+       if (!missed_updates)
+               return load;
+
+       if (missed_updates >= degrade_zero_ticks[idx])
+               return 0;
+
+       if (idx == 1)
+               return load >> missed_updates;
+
+       while (missed_updates) {
+               if (missed_updates % 2)
+                       load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
+
+               missed_updates >>= 1;
+               j++;
+       }
+       return load;
+}
+
+/*
+ * Update rq->cpu_load[] statistics. This function is usually called every
+ * scheduler tick (TICK_NSEC). With tickless idle this will not be called
+ * every tick. We fix it up based on jiffies.
+ */
+static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
+                             unsigned long pending_updates)
+{
+       int i, scale;
+
+       this_rq->nr_load_updates++;
+
+       /* Update our load: */
+       this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
+       for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
+               unsigned long old_load, new_load;
+
+               /* scale is effectively 1 << i now, and >> i divides by scale */
+
+               old_load = this_rq->cpu_load[i];
+               old_load = decay_load_missed(old_load, pending_updates - 1, i);
+               new_load = this_load;
+               /*
+                * Round up the averaging division if load is increasing. This
+                * prevents us from getting stuck on 9 if the load is 10, for
+                * example.
+                */
+               if (new_load > old_load)
+                       new_load += scale - 1;
+
+               this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
+       }
+
+       sched_avg_update(this_rq);
+}
+
+#ifdef CONFIG_NO_HZ_COMMON
+/*
+ * There is no sane way to deal with nohz on smp when using jiffies because the
+ * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
+ * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
+ *
+ * Therefore we cannot use the delta approach from the regular tick since that
+ * would seriously skew the load calculation. However we'll make do for those
+ * updates happening while idle (nohz_idle_balance) or coming out of idle
+ * (tick_nohz_idle_exit).
+ *
+ * This means we might still be one tick off for nohz periods.
+ */
+
+/*
+ * Called from nohz_idle_balance() to update the load ratings before doing the
+ * idle balance.
+ */
+static void update_idle_cpu_load(struct rq *this_rq)
+{
+       unsigned long curr_jiffies = READ_ONCE(jiffies);
+       unsigned long load = this_rq->cfs.runnable_load_avg;
+       unsigned long pending_updates;
+
+       /*
+        * bail if there's load or we're actually up-to-date.
+        */
+       if (load || curr_jiffies == this_rq->last_load_update_tick)
+               return;
+
+       pending_updates = curr_jiffies - this_rq->last_load_update_tick;
+       this_rq->last_load_update_tick = curr_jiffies;
+
+       __update_cpu_load(this_rq, load, pending_updates);
+}
+
+/*
+ * Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed.
+ */
+void update_cpu_load_nohz(void)
+{
+       struct rq *this_rq = this_rq();
+       unsigned long curr_jiffies = READ_ONCE(jiffies);
+       unsigned long pending_updates;
+
+       if (curr_jiffies == this_rq->last_load_update_tick)
+               return;
+
+       raw_spin_lock(&this_rq->lock);
+       pending_updates = curr_jiffies - this_rq->last_load_update_tick;
+       if (pending_updates) {
+               this_rq->last_load_update_tick = curr_jiffies;
+               /*
+                * We were idle, this means load 0, the current load might be
+                * !0 due to remote wakeups and the sort.
+                */
+               __update_cpu_load(this_rq, 0, pending_updates);
+       }
+       raw_spin_unlock(&this_rq->lock);
+}
+#endif /* CONFIG_NO_HZ */
+
+/*
+ * Called from scheduler_tick()
+ */
+void update_cpu_load_active(struct rq *this_rq)
+{
+       unsigned long load = this_rq->cfs.runnable_load_avg;
+       /*
+        * See the mess around update_idle_cpu_load() / update_cpu_load_nohz().
+        */
+       this_rq->last_load_update_tick = jiffies;
+       __update_cpu_load(this_rq, load, 1);
+}
+
 /* Used instead of source_load when we know the type == 0 */
 static unsigned long weighted_cpuload(const int cpu)
 {
@@ -4375,7 +4566,7 @@ static unsigned long capacity_orig_of(int cpu)
 static unsigned long cpu_avg_load_per_task(int cpu)
 {
        struct rq *rq = cpu_rq(cpu);
-       unsigned long nr_running = ACCESS_ONCE(rq->cfs.h_nr_running);
+       unsigned long nr_running = READ_ONCE(rq->cfs.h_nr_running);
        unsigned long load_avg = rq->cfs.runnable_load_avg;
 
        if (nr_running)
@@ -5126,18 +5317,21 @@ again:
                 * entity, update_curr() will update its vruntime, otherwise
                 * forget we've ever seen it.
                 */
-               if (curr && curr->on_rq)
-                       update_curr(cfs_rq);
-               else
-                       curr = NULL;
+               if (curr) {
+                       if (curr->on_rq)
+                               update_curr(cfs_rq);
+                       else
+                               curr = NULL;
 
-               /*
-                * This call to check_cfs_rq_runtime() will do the throttle and
-                * dequeue its entity in the parent(s). Therefore the 'simple'
-                * nr_running test will indeed be correct.
-                */
-               if (unlikely(check_cfs_rq_runtime(cfs_rq)))
-                       goto simple;
+                       /*
+                        * This call to check_cfs_rq_runtime() will do the
+                        * throttle and dequeue its entity in the parent(s).
+                        * Therefore the 'simple' nr_running test will indeed
+                        * be correct.
+                        */
+                       if (unlikely(check_cfs_rq_runtime(cfs_rq)))
+                               goto simple;
+               }
 
                se = pick_next_entity(cfs_rq, curr);
                cfs_rq = group_cfs_rq(se);
@@ -5467,10 +5661,15 @@ static int task_hot(struct task_struct *p, struct lb_env *env)
 }
 
 #ifdef CONFIG_NUMA_BALANCING
-/* Returns true if the destination node has incurred more faults */
+/*
+ * Returns true if the destination node is the preferred node.
+ * Needs to match fbq_classify_rq(): if there is a runnable task
+ * that is not on its preferred node, we should identify it.
+ */
 static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env)
 {
        struct numa_group *numa_group = rcu_dereference(p->numa_group);
+       unsigned long src_faults, dst_faults;
        int src_nid, dst_nid;
 
        if (!sched_feat(NUMA_FAVOUR_HIGHER) || !p->numa_faults ||
@@ -5484,29 +5683,30 @@ static bool migrate_improves_locality(struct task_struct *p, struct lb_env *env)
        if (src_nid == dst_nid)
                return false;
 
-       if (numa_group) {
-               /* Task is already in the group's interleave set. */
-               if (node_isset(src_nid, numa_group->active_nodes))
-                       return false;
-
-               /* Task is moving into the group's interleave set. */
-               if (node_isset(dst_nid, numa_group->active_nodes))
-                       return true;
-
-               return group_faults(p, dst_nid) > group_faults(p, src_nid);
-       }
-
        /* Encourage migration to the preferred node. */
        if (dst_nid == p->numa_preferred_nid)
                return true;
 
-       return task_faults(p, dst_nid) > task_faults(p, src_nid);
+       /* Migrating away from the preferred node is bad. */
+       if (src_nid == p->numa_preferred_nid)
+               return false;
+
+       if (numa_group) {
+               src_faults = group_faults(p, src_nid);
+               dst_faults = group_faults(p, dst_nid);
+       } else {
+               src_faults = task_faults(p, src_nid);
+               dst_faults = task_faults(p, dst_nid);
+       }
+
+       return dst_faults > src_faults;
 }
 
 
 static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
 {
        struct numa_group *numa_group = rcu_dereference(p->numa_group);
+       unsigned long src_faults, dst_faults;
        int src_nid, dst_nid;
 
        if (!sched_feat(NUMA) || !sched_feat(NUMA_RESIST_LOWER))
@@ -5521,23 +5721,23 @@ static bool migrate_degrades_locality(struct task_struct *p, struct lb_env *env)
        if (src_nid == dst_nid)
                return false;
 
-       if (numa_group) {
-               /* Task is moving within/into the group's interleave set. */
-               if (node_isset(dst_nid, numa_group->active_nodes))
-                       return false;
+       /* Migrating away from the preferred node is bad. */
+       if (src_nid == p->numa_preferred_nid)
+               return true;
 
-               /* Task is moving out of the group's interleave set. */
-               if (node_isset(src_nid, numa_group->active_nodes))
-                       return true;
+       /* Encourage migration to the preferred node. */
+       if (dst_nid == p->numa_preferred_nid)
+               return false;
 
-               return group_faults(p, dst_nid) < group_faults(p, src_nid);
+       if (numa_group) {
+               src_faults = group_faults(p, src_nid);
+               dst_faults = group_faults(p, dst_nid);
+       } else {
+               src_faults = task_faults(p, src_nid);
+               dst_faults = task_faults(p, dst_nid);
        }
 
-       /* Migrating away from the preferred node is always bad. */
-       if (src_nid == p->numa_preferred_nid)
-               return true;
-
-       return task_faults(p, dst_nid) < task_faults(p, src_nid);
+       return dst_faults < src_faults;
 }
 
 #else
@@ -6037,8 +6237,8 @@ static unsigned long scale_rt_capacity(int cpu)
         * Since we're reading these variables without serialization make sure
         * we read them once before doing sanity checks on them.
         */
-       age_stamp = ACCESS_ONCE(rq->age_stamp);
-       avg = ACCESS_ONCE(rq->rt_avg);
+       age_stamp = READ_ONCE(rq->age_stamp);
+       avg = READ_ONCE(rq->rt_avg);
        delta = __rq_clock_broken(rq) - age_stamp;
 
        if (unlikely(delta < 0))
diff --git a/kernel/sched/loadavg.c b/kernel/sched/loadavg.c
new file mode 100644 (file)
index 0000000..ef71590
--- /dev/null
@@ -0,0 +1,394 @@
+/*
+ * kernel/sched/loadavg.c
+ *
+ * This file contains the magic bits required to compute the global loadavg
+ * figure. Its a silly number but people think its important. We go through
+ * great pains to make it work on big machines and tickless kernels.
+ */
+
+#include <linux/export.h>
+
+#include "sched.h"
+
+/*
+ * Global load-average calculations
+ *
+ * We take a distributed and async approach to calculating the global load-avg
+ * in order to minimize overhead.
+ *
+ * The global load average is an exponentially decaying average of nr_running +
+ * nr_uninterruptible.
+ *
+ * Once every LOAD_FREQ:
+ *
+ *   nr_active = 0;
+ *   for_each_possible_cpu(cpu)
+ *     nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible;
+ *
+ *   avenrun[n] = avenrun[0] * exp_n + nr_active * (1 - exp_n)
+ *
+ * Due to a number of reasons the above turns in the mess below:
+ *
+ *  - for_each_possible_cpu() is prohibitively expensive on machines with
+ *    serious number of cpus, therefore we need to take a distributed approach
+ *    to calculating nr_active.
+ *
+ *        \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0
+ *                      = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) }
+ *
+ *    So assuming nr_active := 0 when we start out -- true per definition, we
+ *    can simply take per-cpu deltas and fold those into a global accumulate
+ *    to obtain the same result. See calc_load_fold_active().
+ *
+ *    Furthermore, in order to avoid synchronizing all per-cpu delta folding
+ *    across the machine, we assume 10 ticks is sufficient time for every
+ *    cpu to have completed this task.
+ *
+ *    This places an upper-bound on the IRQ-off latency of the machine. Then
+ *    again, being late doesn't loose the delta, just wrecks the sample.
+ *
+ *  - cpu_rq()->nr_uninterruptible isn't accurately tracked per-cpu because
+ *    this would add another cross-cpu cacheline miss and atomic operation
+ *    to the wakeup path. Instead we increment on whatever cpu the task ran
+ *    when it went into uninterruptible state and decrement on whatever cpu
+ *    did the wakeup. This means that only the sum of nr_uninterruptible over
+ *    all cpus yields the correct result.
+ *
+ *  This covers the NO_HZ=n code, for extra head-aches, see the comment below.
+ */
+
+/* Variables and functions for calc_load */
+atomic_long_t calc_load_tasks;
+unsigned long calc_load_update;
+unsigned long avenrun[3];
+EXPORT_SYMBOL(avenrun); /* should be removed */
+
+/**
+ * get_avenrun - get the load average array
+ * @loads:     pointer to dest load array
+ * @offset:    offset to add
+ * @shift:     shift count to shift the result left
+ *
+ * These values are estimates at best, so no need for locking.
+ */
+void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
+{
+       loads[0] = (avenrun[0] + offset) << shift;
+       loads[1] = (avenrun[1] + offset) << shift;
+       loads[2] = (avenrun[2] + offset) << shift;
+}
+
+long calc_load_fold_active(struct rq *this_rq)
+{
+       long nr_active, delta = 0;
+
+       nr_active = this_rq->nr_running;
+       nr_active += (long)this_rq->nr_uninterruptible;
+
+       if (nr_active != this_rq->calc_load_active) {
+               delta = nr_active - this_rq->calc_load_active;
+               this_rq->calc_load_active = nr_active;
+       }
+
+       return delta;
+}
+
+/*
+ * a1 = a0 * e + a * (1 - e)
+ */
+static unsigned long
+calc_load(unsigned long load, unsigned long exp, unsigned long active)
+{
+       load *= exp;
+       load += active * (FIXED_1 - exp);
+       load += 1UL << (FSHIFT - 1);
+       return load >> FSHIFT;
+}
+
+#ifdef CONFIG_NO_HZ_COMMON
+/*
+ * Handle NO_HZ for the global load-average.
+ *
+ * Since the above described distributed algorithm to compute the global
+ * load-average relies on per-cpu sampling from the tick, it is affected by
+ * NO_HZ.
+ *
+ * The basic idea is to fold the nr_active delta into a global idle-delta upon
+ * entering NO_HZ state such that we can include this as an 'extra' cpu delta
+ * when we read the global state.
+ *
+ * Obviously reality has to ruin such a delightfully simple scheme:
+ *
+ *  - When we go NO_HZ idle during the window, we can negate our sample
+ *    contribution, causing under-accounting.
+ *
+ *    We avoid this by keeping two idle-delta counters and flipping them
+ *    when the window starts, thus separating old and new NO_HZ load.
+ *
+ *    The only trick is the slight shift in index flip for read vs write.
+ *
+ *        0s            5s            10s           15s
+ *          +10           +10           +10           +10
+ *        |-|-----------|-|-----------|-|-----------|-|
+ *    r:0 0 1           1 0           0 1           1 0
+ *    w:0 1 1           0 0           1 1           0 0
+ *
+ *    This ensures we'll fold the old idle contribution in this window while
+ *    accumlating the new one.
+ *
+ *  - When we wake up from NO_HZ idle during the window, we push up our
+ *    contribution, since we effectively move our sample point to a known
+ *    busy state.
+ *
+ *    This is solved by pushing the window forward, and thus skipping the
+ *    sample, for this cpu (effectively using the idle-delta for this cpu which
+ *    was in effect at the time the window opened). This also solves the issue
+ *    of having to deal with a cpu having been in NOHZ idle for multiple
+ *    LOAD_FREQ intervals.
+ *
+ * When making the ILB scale, we should try to pull this in as well.
+ */
+static atomic_long_t calc_load_idle[2];
+static int calc_load_idx;
+
+static inline int calc_load_write_idx(void)
+{
+       int idx = calc_load_idx;
+
+       /*
+        * See calc_global_nohz(), if we observe the new index, we also
+        * need to observe the new update time.
+        */
+       smp_rmb();
+
+       /*
+        * If the folding window started, make sure we start writing in the
+        * next idle-delta.
+        */
+       if (!time_before(jiffies, calc_load_update))
+               idx++;
+
+       return idx & 1;
+}
+
+static inline int calc_load_read_idx(void)
+{
+       return calc_load_idx & 1;
+}
+
+void calc_load_enter_idle(void)
+{
+       struct rq *this_rq = this_rq();
+       long delta;
+
+       /*
+        * We're going into NOHZ mode, if there's any pending delta, fold it
+        * into the pending idle delta.
+        */
+       delta = calc_load_fold_active(this_rq);
+       if (delta) {
+               int idx = calc_load_write_idx();
+
+               atomic_long_add(delta, &calc_load_idle[idx]);
+       }
+}
+
+void calc_load_exit_idle(void)
+{
+       struct rq *this_rq = this_rq();
+
+       /*
+        * If we're still before the sample window, we're done.
+        */
+       if (time_before(jiffies, this_rq->calc_load_update))
+               return;
+
+       /*
+        * We woke inside or after the sample window, this means we're already
+        * accounted through the nohz accounting, so skip the entire deal and
+        * sync up for the next window.
+        */
+       this_rq->calc_load_update = calc_load_update;
+       if (time_before(jiffies, this_rq->calc_load_update + 10))
+               this_rq->calc_load_update += LOAD_FREQ;
+}
+
+static long calc_load_fold_idle(void)
+{
+       int idx = calc_load_read_idx();
+       long delta = 0;
+
+       if (atomic_long_read(&calc_load_idle[idx]))
+               delta = atomic_long_xchg(&calc_load_idle[idx], 0);
+
+       return delta;
+}
+
+/**
+ * fixed_power_int - compute: x^n, in O(log n) time
+ *
+ * @x:         base of the power
+ * @frac_bits: fractional bits of @x
+ * @n:         power to raise @x to.
+ *
+ * By exploiting the relation between the definition of the natural power
+ * function: x^n := x*x*...*x (x multiplied by itself for n times), and
+ * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
+ * (where: n_i \elem {0, 1}, the binary vector representing n),
+ * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
+ * of course trivially computable in O(log_2 n), the length of our binary
+ * vector.
+ */
+static unsigned long
+fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
+{
+       unsigned long result = 1UL << frac_bits;
+
+       if (n) {
+               for (;;) {
+                       if (n & 1) {
+                               result *= x;
+                               result += 1UL << (frac_bits - 1);
+                               result >>= frac_bits;
+                       }
+                       n >>= 1;
+                       if (!n)
+                               break;
+                       x *= x;
+                       x += 1UL << (frac_bits - 1);
+                       x >>= frac_bits;
+               }
+       }
+
+       return result;
+}
+
+/*
+ * a1 = a0 * e + a * (1 - e)
+ *
+ * a2 = a1 * e + a * (1 - e)
+ *    = (a0 * e + a * (1 - e)) * e + a * (1 - e)
+ *    = a0 * e^2 + a * (1 - e) * (1 + e)
+ *
+ * a3 = a2 * e + a * (1 - e)
+ *    = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
+ *    = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
+ *
+ *  ...
+ *
+ * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
+ *    = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
+ *    = a0 * e^n + a * (1 - e^n)
+ *
+ * [1] application of the geometric series:
+ *
+ *              n         1 - x^(n+1)
+ *     S_n := \Sum x^i = -------------
+ *             i=0          1 - x
+ */
+static unsigned long
+calc_load_n(unsigned long load, unsigned long exp,
+           unsigned long active, unsigned int n)
+{
+       return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
+}
+
+/*
+ * NO_HZ can leave us missing all per-cpu ticks calling
+ * calc_load_account_active(), but since an idle CPU folds its delta into
+ * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold
+ * in the pending idle delta if our idle period crossed a load cycle boundary.
+ *
+ * Once we've updated the global active value, we need to apply the exponential
+ * weights adjusted to the number of cycles missed.
+ */
+static void calc_global_nohz(void)
+{
+       long delta, active, n;
+
+       if (!time_before(jiffies, calc_load_update + 10)) {
+               /*
+                * Catch-up, fold however many we are behind still
+                */
+               delta = jiffies - calc_load_update - 10;
+               n = 1 + (delta / LOAD_FREQ);
+
+               active = atomic_long_read(&calc_load_tasks);
+               active = active > 0 ? active * FIXED_1 : 0;
+
+               avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
+               avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
+               avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
+
+               calc_load_update += n * LOAD_FREQ;
+       }
+
+       /*
+        * Flip the idle index...
+        *
+        * Make sure we first write the new time then flip the index, so that
+        * calc_load_write_idx() will see the new time when it reads the new
+        * index, this avoids a double flip messing things up.
+        */
+       smp_wmb();
+       calc_load_idx++;
+}
+#else /* !CONFIG_NO_HZ_COMMON */
+
+static inline long calc_load_fold_idle(void) { return 0; }
+static inline void calc_global_nohz(void) { }
+
+#endif /* CONFIG_NO_HZ_COMMON */
+
+/*
+ * calc_load - update the avenrun load estimates 10 ticks after the
+ * CPUs have updated calc_load_tasks.
+ *
+ * Called from the global timer code.
+ */
+void calc_global_load(unsigned long ticks)
+{
+       long active, delta;
+
+       if (time_before(jiffies, calc_load_update + 10))
+               return;
+
+       /*
+        * Fold the 'old' idle-delta to include all NO_HZ cpus.
+        */
+       delta = calc_load_fold_idle();
+       if (delta)
+               atomic_long_add(delta, &calc_load_tasks);
+
+       active = atomic_long_read(&calc_load_tasks);
+       active = active > 0 ? active * FIXED_1 : 0;
+
+       avenrun[0] = calc_load(avenrun[0], EXP_1, active);
+       avenrun[1] = calc_load(avenrun[1], EXP_5, active);
+       avenrun[2] = calc_load(avenrun[2], EXP_15, active);
+
+       calc_load_update += LOAD_FREQ;
+
+       /*
+        * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk.
+        */
+       calc_global_nohz();
+}
+
+/*
+ * Called from scheduler_tick() to periodically update this CPU's
+ * active count.
+ */
+void calc_global_load_tick(struct rq *this_rq)
+{
+       long delta;
+
+       if (time_before(jiffies, this_rq->calc_load_update))
+               return;
+
+       delta  = calc_load_fold_active(this_rq);
+       if (delta)
+               atomic_long_add(delta, &calc_load_tasks);
+
+       this_rq->calc_load_update += LOAD_FREQ;
+}
diff --git a/kernel/sched/proc.c b/kernel/sched/proc.c
deleted file mode 100644 (file)
index 8ecd552..0000000
+++ /dev/null
@@ -1,584 +0,0 @@
-/*
- *  kernel/sched/proc.c
- *
- *  Kernel load calculations, forked from sched/core.c
- */
-
-#include <linux/export.h>
-
-#include "sched.h"
-
-/*
- * Global load-average calculations
- *
- * We take a distributed and async approach to calculating the global load-avg
- * in order to minimize overhead.
- *
- * The global load average is an exponentially decaying average of nr_running +
- * nr_uninterruptible.
- *
- * Once every LOAD_FREQ:
- *
- *   nr_active = 0;
- *   for_each_possible_cpu(cpu)
- *     nr_active += cpu_of(cpu)->nr_running + cpu_of(cpu)->nr_uninterruptible;
- *
- *   avenrun[n] = avenrun[0] * exp_n + nr_active * (1 - exp_n)
- *
- * Due to a number of reasons the above turns in the mess below:
- *
- *  - for_each_possible_cpu() is prohibitively expensive on machines with
- *    serious number of cpus, therefore we need to take a distributed approach
- *    to calculating nr_active.
- *
- *        \Sum_i x_i(t) = \Sum_i x_i(t) - x_i(t_0) | x_i(t_0) := 0
- *                      = \Sum_i { \Sum_j=1 x_i(t_j) - x_i(t_j-1) }
- *
- *    So assuming nr_active := 0 when we start out -- true per definition, we
- *    can simply take per-cpu deltas and fold those into a global accumulate
- *    to obtain the same result. See calc_load_fold_active().
- *
- *    Furthermore, in order to avoid synchronizing all per-cpu delta folding
- *    across the machine, we assume 10 ticks is sufficient time for every
- *    cpu to have completed this task.
- *
- *    This places an upper-bound on the IRQ-off latency of the machine. Then
- *    again, being late doesn't loose the delta, just wrecks the sample.
- *
- *  - cpu_rq()->nr_uninterruptible isn't accurately tracked per-cpu because
- *    this would add another cross-cpu cacheline miss and atomic operation
- *    to the wakeup path. Instead we increment on whatever cpu the task ran
- *    when it went into uninterruptible state and decrement on whatever cpu
- *    did the wakeup. This means that only the sum of nr_uninterruptible over
- *    all cpus yields the correct result.
- *
- *  This covers the NO_HZ=n code, for extra head-aches, see the comment below.
- */
-
-/* Variables and functions for calc_load */
-atomic_long_t calc_load_tasks;
-unsigned long calc_load_update;
-unsigned long avenrun[3];
-EXPORT_SYMBOL(avenrun); /* should be removed */
-
-/**
- * get_avenrun - get the load average array
- * @loads:     pointer to dest load array
- * @offset:    offset to add
- * @shift:     shift count to shift the result left
- *
- * These values are estimates at best, so no need for locking.
- */
-void get_avenrun(unsigned long *loads, unsigned long offset, int shift)
-{
-       loads[0] = (avenrun[0] + offset) << shift;
-       loads[1] = (avenrun[1] + offset) << shift;
-       loads[2] = (avenrun[2] + offset) << shift;
-}
-
-long calc_load_fold_active(struct rq *this_rq)
-{
-       long nr_active, delta = 0;
-
-       nr_active = this_rq->nr_running;
-       nr_active += (long) this_rq->nr_uninterruptible;
-
-       if (nr_active != this_rq->calc_load_active) {
-               delta = nr_active - this_rq->calc_load_active;
-               this_rq->calc_load_active = nr_active;
-       }
-
-       return delta;
-}
-
-/*
- * a1 = a0 * e + a * (1 - e)
- */
-static unsigned long
-calc_load(unsigned long load, unsigned long exp, unsigned long active)
-{
-       load *= exp;
-       load += active * (FIXED_1 - exp);
-       load += 1UL << (FSHIFT - 1);
-       return load >> FSHIFT;
-}
-
-#ifdef CONFIG_NO_HZ_COMMON
-/*
- * Handle NO_HZ for the global load-average.
- *
- * Since the above described distributed algorithm to compute the global
- * load-average relies on per-cpu sampling from the tick, it is affected by
- * NO_HZ.
- *
- * The basic idea is to fold the nr_active delta into a global idle-delta upon
- * entering NO_HZ state such that we can include this as an 'extra' cpu delta
- * when we read the global state.
- *
- * Obviously reality has to ruin such a delightfully simple scheme:
- *
- *  - When we go NO_HZ idle during the window, we can negate our sample
- *    contribution, causing under-accounting.
- *
- *    We avoid this by keeping two idle-delta counters and flipping them
- *    when the window starts, thus separating old and new NO_HZ load.
- *
- *    The only trick is the slight shift in index flip for read vs write.
- *
- *        0s            5s            10s           15s
- *          +10           +10           +10           +10
- *        |-|-----------|-|-----------|-|-----------|-|
- *    r:0 0 1           1 0           0 1           1 0
- *    w:0 1 1           0 0           1 1           0 0
- *
- *    This ensures we'll fold the old idle contribution in this window while
- *    accumlating the new one.
- *
- *  - When we wake up from NO_HZ idle during the window, we push up our
- *    contribution, since we effectively move our sample point to a known
- *    busy state.
- *
- *    This is solved by pushing the window forward, and thus skipping the
- *    sample, for this cpu (effectively using the idle-delta for this cpu which
- *    was in effect at the time the window opened). This also solves the issue
- *    of having to deal with a cpu having been in NOHZ idle for multiple
- *    LOAD_FREQ intervals.
- *
- * When making the ILB scale, we should try to pull this in as well.
- */
-static atomic_long_t calc_load_idle[2];
-static int calc_load_idx;
-
-static inline int calc_load_write_idx(void)
-{
-       int idx = calc_load_idx;
-
-       /*
-        * See calc_global_nohz(), if we observe the new index, we also
-        * need to observe the new update time.
-        */
-       smp_rmb();
-
-       /*
-        * If the folding window started, make sure we start writing in the
-        * next idle-delta.
-        */
-       if (!time_before(jiffies, calc_load_update))
-               idx++;
-
-       return idx & 1;
-}
-
-static inline int calc_load_read_idx(void)
-{
-       return calc_load_idx & 1;
-}
-
-void calc_load_enter_idle(void)
-{
-       struct rq *this_rq = this_rq();
-       long delta;
-
-       /*
-        * We're going into NOHZ mode, if there's any pending delta, fold it
-        * into the pending idle delta.
-        */
-       delta = calc_load_fold_active(this_rq);
-       if (delta) {
-               int idx = calc_load_write_idx();
-               atomic_long_add(delta, &calc_load_idle[idx]);
-       }
-}
-
-void calc_load_exit_idle(void)
-{
-       struct rq *this_rq = this_rq();
-
-       /*
-        * If we're still before the sample window, we're done.
-        */
-       if (time_before(jiffies, this_rq->calc_load_update))
-               return;
-
-       /*
-        * We woke inside or after the sample window, this means we're already
-        * accounted through the nohz accounting, so skip the entire deal and
-        * sync up for the next window.
-        */
-       this_rq->calc_load_update = calc_load_update;
-       if (time_before(jiffies, this_rq->calc_load_update + 10))
-               this_rq->calc_load_update += LOAD_FREQ;
-}
-
-static long calc_load_fold_idle(void)
-{
-       int idx = calc_load_read_idx();
-       long delta = 0;
-
-       if (atomic_long_read(&calc_load_idle[idx]))
-               delta = atomic_long_xchg(&calc_load_idle[idx], 0);
-
-       return delta;
-}
-
-/**
- * fixed_power_int - compute: x^n, in O(log n) time
- *
- * @x:         base of the power
- * @frac_bits: fractional bits of @x
- * @n:         power to raise @x to.
- *
- * By exploiting the relation between the definition of the natural power
- * function: x^n := x*x*...*x (x multiplied by itself for n times), and
- * the binary encoding of numbers used by computers: n := \Sum n_i * 2^i,
- * (where: n_i \elem {0, 1}, the binary vector representing n),
- * we find: x^n := x^(\Sum n_i * 2^i) := \Prod x^(n_i * 2^i), which is
- * of course trivially computable in O(log_2 n), the length of our binary
- * vector.
- */
-static unsigned long
-fixed_power_int(unsigned long x, unsigned int frac_bits, unsigned int n)
-{
-       unsigned long result = 1UL << frac_bits;
-
-       if (n) for (;;) {
-               if (n & 1) {
-                       result *= x;
-                       result += 1UL << (frac_bits - 1);
-                       result >>= frac_bits;
-               }
-               n >>= 1;
-               if (!n)
-                       break;
-               x *= x;
-               x += 1UL << (frac_bits - 1);
-               x >>= frac_bits;
-       }
-
-       return result;
-}
-
-/*
- * a1 = a0 * e + a * (1 - e)
- *
- * a2 = a1 * e + a * (1 - e)
- *    = (a0 * e + a * (1 - e)) * e + a * (1 - e)
- *    = a0 * e^2 + a * (1 - e) * (1 + e)
- *
- * a3 = a2 * e + a * (1 - e)
- *    = (a0 * e^2 + a * (1 - e) * (1 + e)) * e + a * (1 - e)
- *    = a0 * e^3 + a * (1 - e) * (1 + e + e^2)
- *
- *  ...
- *
- * an = a0 * e^n + a * (1 - e) * (1 + e + ... + e^n-1) [1]
- *    = a0 * e^n + a * (1 - e) * (1 - e^n)/(1 - e)
- *    = a0 * e^n + a * (1 - e^n)
- *
- * [1] application of the geometric series:
- *
- *              n         1 - x^(n+1)
- *     S_n := \Sum x^i = -------------
- *             i=0          1 - x
- */
-static unsigned long
-calc_load_n(unsigned long load, unsigned long exp,
-           unsigned long active, unsigned int n)
-{
-
-       return calc_load(load, fixed_power_int(exp, FSHIFT, n), active);
-}
-
-/*
- * NO_HZ can leave us missing all per-cpu ticks calling
- * calc_load_account_active(), but since an idle CPU folds its delta into
- * calc_load_tasks_idle per calc_load_account_idle(), all we need to do is fold
- * in the pending idle delta if our idle period crossed a load cycle boundary.
- *
- * Once we've updated the global active value, we need to apply the exponential
- * weights adjusted to the number of cycles missed.
- */
-static void calc_global_nohz(void)
-{
-       long delta, active, n;
-
-       if (!time_before(jiffies, calc_load_update + 10)) {
-               /*
-                * Catch-up, fold however many we are behind still
-                */
-               delta = jiffies - calc_load_update - 10;
-               n = 1 + (delta / LOAD_FREQ);
-
-               active = atomic_long_read(&calc_load_tasks);
-               active = active > 0 ? active * FIXED_1 : 0;
-
-               avenrun[0] = calc_load_n(avenrun[0], EXP_1, active, n);
-               avenrun[1] = calc_load_n(avenrun[1], EXP_5, active, n);
-               avenrun[2] = calc_load_n(avenrun[2], EXP_15, active, n);
-
-               calc_load_update += n * LOAD_FREQ;
-       }
-
-       /*
-        * Flip the idle index...
-        *
-        * Make sure we first write the new time then flip the index, so that
-        * calc_load_write_idx() will see the new time when it reads the new
-        * index, this avoids a double flip messing things up.
-        */
-       smp_wmb();
-       calc_load_idx++;
-}
-#else /* !CONFIG_NO_HZ_COMMON */
-
-static inline long calc_load_fold_idle(void) { return 0; }
-static inline void calc_global_nohz(void) { }
-
-#endif /* CONFIG_NO_HZ_COMMON */
-
-/*
- * calc_load - update the avenrun load estimates 10 ticks after the
- * CPUs have updated calc_load_tasks.
- */
-void calc_global_load(unsigned long ticks)
-{
-       long active, delta;
-
-       if (time_before(jiffies, calc_load_update + 10))
-               return;
-
-       /*
-        * Fold the 'old' idle-delta to include all NO_HZ cpus.
-        */
-       delta = calc_load_fold_idle();
-       if (delta)
-               atomic_long_add(delta, &calc_load_tasks);
-
-       active = atomic_long_read(&calc_load_tasks);
-       active = active > 0 ? active * FIXED_1 : 0;
-
-       avenrun[0] = calc_load(avenrun[0], EXP_1, active);
-       avenrun[1] = calc_load(avenrun[1], EXP_5, active);
-       avenrun[2] = calc_load(avenrun[2], EXP_15, active);
-
-       calc_load_update += LOAD_FREQ;
-
-       /*
-        * In case we idled for multiple LOAD_FREQ intervals, catch up in bulk.
-        */
-       calc_global_nohz();
-}
-
-/*
- * Called from update_cpu_load() to periodically update this CPU's
- * active count.
- */
-static void calc_load_account_active(struct rq *this_rq)
-{
-       long delta;
-
-       if (time_before(jiffies, this_rq->calc_load_update))
-               return;
-
-       delta  = calc_load_fold_active(this_rq);
-       if (delta)
-               atomic_long_add(delta, &calc_load_tasks);
-
-       this_rq->calc_load_update += LOAD_FREQ;
-}
-
-/*
- * End of global load-average stuff
- */
-
-/*
- * The exact cpuload at various idx values, calculated at every tick would be
- * load = (2^idx - 1) / 2^idx * load + 1 / 2^idx * cur_load
- *
- * If a cpu misses updates for n-1 ticks (as it was idle) and update gets called
- * on nth tick when cpu may be busy, then we have:
- * load = ((2^idx - 1) / 2^idx)^(n-1) * load
- * load = (2^idx - 1) / 2^idx) * load + 1 / 2^idx * cur_load
- *
- * decay_load_missed() below does efficient calculation of
- * load = ((2^idx - 1) / 2^idx)^(n-1) * load
- * avoiding 0..n-1 loop doing load = ((2^idx - 1) / 2^idx) * load
- *
- * The calculation is approximated on a 128 point scale.
- * degrade_zero_ticks is the number of ticks after which load at any
- * particular idx is approximated to be zero.
- * degrade_factor is a precomputed table, a row for each load idx.
- * Each column corresponds to degradation factor for a power of two ticks,
- * based on 128 point scale.
- * Example:
- * row 2, col 3 (=12) says that the degradation at load idx 2 after
- * 8 ticks is 12/128 (which is an approximation of exact factor 3^8/4^8).
- *
- * With this power of 2 load factors, we can degrade the load n times
- * by looking at 1 bits in n and doing as many mult/shift instead of
- * n mult/shifts needed by the exact degradation.
- */
-#define DEGRADE_SHIFT          7
-static const unsigned char
-               degrade_zero_ticks[CPU_LOAD_IDX_MAX] = {0, 8, 32, 64, 128};
-static const unsigned char
-               degrade_factor[CPU_LOAD_IDX_MAX][DEGRADE_SHIFT + 1] = {
-                                       {0, 0, 0, 0, 0, 0, 0, 0},
-                                       {64, 32, 8, 0, 0, 0, 0, 0},
-                                       {96, 72, 40, 12, 1, 0, 0},
-                                       {112, 98, 75, 43, 15, 1, 0},
-                                       {120, 112, 98, 76, 45, 16, 2} };
-
-/*
- * Update cpu_load for any missed ticks, due to tickless idle. The backlog
- * would be when CPU is idle and so we just decay the old load without
- * adding any new load.
- */
-static unsigned long
-decay_load_missed(unsigned long load, unsigned long missed_updates, int idx)
-{
-       int j = 0;
-
-       if (!missed_updates)
-               return load;
-
-       if (missed_updates >= degrade_zero_ticks[idx])
-               return 0;
-
-       if (idx == 1)
-               return load >> missed_updates;
-
-       while (missed_updates) {
-               if (missed_updates % 2)
-                       load = (load * degrade_factor[idx][j]) >> DEGRADE_SHIFT;
-
-               missed_updates >>= 1;
-               j++;
-       }
-       return load;
-}
-
-/*
- * Update rq->cpu_load[] statistics. This function is usually called every
- * scheduler tick (TICK_NSEC). With tickless idle this will not be called
- * every tick. We fix it up based on jiffies.
- */
-static void __update_cpu_load(struct rq *this_rq, unsigned long this_load,
-                             unsigned long pending_updates)
-{
-       int i, scale;
-
-       this_rq->nr_load_updates++;
-
-       /* Update our load: */
-       this_rq->cpu_load[0] = this_load; /* Fasttrack for idx 0 */
-       for (i = 1, scale = 2; i < CPU_LOAD_IDX_MAX; i++, scale += scale) {
-               unsigned long old_load, new_load;
-
-               /* scale is effectively 1 << i now, and >> i divides by scale */
-
-               old_load = this_rq->cpu_load[i];
-               old_load = decay_load_missed(old_load, pending_updates - 1, i);
-               new_load = this_load;
-               /*
-                * Round up the averaging division if load is increasing. This
-                * prevents us from getting stuck on 9 if the load is 10, for
-                * example.
-                */
-               if (new_load > old_load)
-                       new_load += scale - 1;
-
-               this_rq->cpu_load[i] = (old_load * (scale - 1) + new_load) >> i;
-       }
-
-       sched_avg_update(this_rq);
-}
-
-#ifdef CONFIG_SMP
-static inline unsigned long get_rq_runnable_load(struct rq *rq)
-{
-       return rq->cfs.runnable_load_avg;
-}
-#else
-static inline unsigned long get_rq_runnable_load(struct rq *rq)
-{
-       return rq->load.weight;
-}
-#endif
-
-#ifdef CONFIG_NO_HZ_COMMON
-/*
- * There is no sane way to deal with nohz on smp when using jiffies because the
- * cpu doing the jiffies update might drift wrt the cpu doing the jiffy reading
- * causing off-by-one errors in observed deltas; {0,2} instead of {1,1}.
- *
- * Therefore we cannot use the delta approach from the regular tick since that
- * would seriously skew the load calculation. However we'll make do for those
- * updates happening while idle (nohz_idle_balance) or coming out of idle
- * (tick_nohz_idle_exit).
- *
- * This means we might still be one tick off for nohz periods.
- */
-
-/*
- * Called from nohz_idle_balance() to update the load ratings before doing the
- * idle balance.
- */
-void update_idle_cpu_load(struct rq *this_rq)
-{
-       unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
-       unsigned long load = get_rq_runnable_load(this_rq);
-       unsigned long pending_updates;
-
-       /*
-        * bail if there's load or we're actually up-to-date.
-        */
-       if (load || curr_jiffies == this_rq->last_load_update_tick)
-               return;
-
-       pending_updates = curr_jiffies - this_rq->last_load_update_tick;
-       this_rq->last_load_update_tick = curr_jiffies;
-
-       __update_cpu_load(this_rq, load, pending_updates);
-}
-
-/*
- * Called from tick_nohz_idle_exit() -- try and fix up the ticks we missed.
- */
-void update_cpu_load_nohz(void)
-{
-       struct rq *this_rq = this_rq();
-       unsigned long curr_jiffies = ACCESS_ONCE(jiffies);
-       unsigned long pending_updates;
-
-       if (curr_jiffies == this_rq->last_load_update_tick)
-               return;
-
-       raw_spin_lock(&this_rq->lock);
-       pending_updates = curr_jiffies - this_rq->last_load_update_tick;
-       if (pending_updates) {
-               this_rq->last_load_update_tick = curr_jiffies;
-               /*
-                * We were idle, this means load 0, the current load might be
-                * !0 due to remote wakeups and the sort.
-                */
-               __update_cpu_load(this_rq, 0, pending_updates);
-       }
-       raw_spin_unlock(&this_rq->lock);
-}
-#endif /* CONFIG_NO_HZ */
-
-/*
- * Called from scheduler_tick()
- */
-void update_cpu_load_active(struct rq *this_rq)
-{
-       unsigned long load = get_rq_runnable_load(this_rq);
-       /*
-        * See the mess around update_idle_cpu_load() / update_cpu_load_nohz().
-        */
-       this_rq->last_load_update_tick = jiffies;
-       __update_cpu_load(this_rq, load, 1);
-
-       calc_load_account_active(this_rq);
-}
index 575da76a3874a8c1b2ddd0f518e5ecea7a805262..7d7093c51f8d169cea027c78a9ca0321c8f15932 100644 (file)
@@ -18,19 +18,22 @@ static enum hrtimer_restart sched_rt_period_timer(struct hrtimer *timer)
 {
        struct rt_bandwidth *rt_b =
                container_of(timer, struct rt_bandwidth, rt_period_timer);
-       ktime_t now;
-       int overrun;
        int idle = 0;
+       int overrun;
 
+       raw_spin_lock(&rt_b->rt_runtime_lock);
        for (;;) {
-               now = hrtimer_cb_get_time(timer);
-               overrun = hrtimer_forward(timer, now, rt_b->rt_period);
-
+               overrun = hrtimer_forward_now(timer, rt_b->rt_period);
                if (!overrun)
                        break;
 
+               raw_spin_unlock(&rt_b->rt_runtime_lock);
                idle = do_sched_rt_period_timer(rt_b, overrun);
+               raw_spin_lock(&rt_b->rt_runtime_lock);
        }
+       if (idle)
+               rt_b->rt_period_active = 0;
+       raw_spin_unlock(&rt_b->rt_runtime_lock);
 
        return idle ? HRTIMER_NORESTART : HRTIMER_RESTART;
 }
@@ -52,11 +55,12 @@ static void start_rt_bandwidth(struct rt_bandwidth *rt_b)
        if (!rt_bandwidth_enabled() || rt_b->rt_runtime == RUNTIME_INF)
                return;
 
-       if (hrtimer_active(&rt_b->rt_period_timer))
-               return;
-
        raw_spin_lock(&rt_b->rt_runtime_lock);
-       start_bandwidth_timer(&rt_b->rt_period_timer, rt_b->rt_period);
+       if (!rt_b->rt_period_active) {
+               rt_b->rt_period_active = 1;
+               hrtimer_forward_now(&rt_b->rt_period_timer, rt_b->rt_period);
+               hrtimer_start_expires(&rt_b->rt_period_timer, HRTIMER_MODE_ABS_PINNED);
+       }
        raw_spin_unlock(&rt_b->rt_runtime_lock);
 }
 
@@ -1323,7 +1327,7 @@ select_task_rq_rt(struct task_struct *p, int cpu, int sd_flag, int flags)
        rq = cpu_rq(cpu);
 
        rcu_read_lock();
-       curr = ACCESS_ONCE(rq->curr); /* unlocked access */
+       curr = READ_ONCE(rq->curr); /* unlocked access */
 
        /*
         * If the current task on @p's runqueue is an RT task, then
index e0e1299939588ac47f08b13b45f1a6e2e9cf4d7f..aea7c1f393cb3c983b3fd01e7df53155e4859ef3 100644 (file)
@@ -26,8 +26,14 @@ extern __read_mostly int scheduler_running;
 extern unsigned long calc_load_update;
 extern atomic_long_t calc_load_tasks;
 
+extern void calc_global_load_tick(struct rq *this_rq);
 extern long calc_load_fold_active(struct rq *this_rq);
+
+#ifdef CONFIG_SMP
 extern void update_cpu_load_active(struct rq *this_rq);
+#else
+static inline void update_cpu_load_active(struct rq *this_rq) { }
+#endif
 
 /*
  * Helpers for converting nanosecond timing to jiffy resolution
@@ -131,6 +137,7 @@ struct rt_bandwidth {
        ktime_t                 rt_period;
        u64                     rt_runtime;
        struct hrtimer          rt_period_timer;
+       unsigned int            rt_period_active;
 };
 
 void __dl_clear_params(struct task_struct *p);
@@ -215,7 +222,7 @@ struct cfs_bandwidth {
        s64 hierarchical_quota;
        u64 runtime_expires;
 
-       int idle, timer_active;
+       int idle, period_active;
        struct hrtimer period_timer, slack_timer;
        struct list_head throttled_cfs_rq;
 
@@ -306,7 +313,7 @@ extern void init_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
 extern int sched_group_set_shares(struct task_group *tg, unsigned long shares);
 
 extern void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b);
-extern void __start_cfs_bandwidth(struct cfs_bandwidth *cfs_b, bool force);
+extern void start_cfs_bandwidth(struct cfs_bandwidth *cfs_b);
 extern void unthrottle_cfs_rq(struct cfs_rq *cfs_rq);
 
 extern void free_rt_sched_group(struct task_group *tg);
@@ -707,7 +714,7 @@ DECLARE_PER_CPU_SHARED_ALIGNED(struct rq, runqueues);
 
 static inline u64 __rq_clock_broken(struct rq *rq)
 {
-       return ACCESS_ONCE(rq->clock);
+       return READ_ONCE(rq->clock);
 }
 
 static inline u64 rq_clock(struct rq *rq)
@@ -1284,7 +1291,6 @@ extern void update_max_interval(void);
 extern void init_sched_dl_class(void);
 extern void init_sched_rt_class(void);
 extern void init_sched_fair_class(void);
-extern void init_sched_dl_class(void);
 
 extern void resched_curr(struct rq *rq);
 extern void resched_cpu(int cpu);
@@ -1298,8 +1304,6 @@ extern void init_dl_task_timer(struct sched_dl_entity *dl_se);
 
 unsigned long to_ratio(u64 period, u64 runtime);
 
-extern void update_idle_cpu_load(struct rq *this_rq);
-
 extern void init_task_runnable_average(struct task_struct *p);
 
 static inline void add_nr_running(struct rq *rq, unsigned count)
@@ -1406,8 +1410,6 @@ static inline void sched_rt_avg_update(struct rq *rq, u64 rt_delta) { }
 static inline void sched_avg_update(struct rq *rq) { }
 #endif
 
-extern void start_bandwidth_timer(struct hrtimer *period_timer, ktime_t period);
-
 /*
  * __task_rq_lock - lock the rq @p resides on.
  */
index 4ab7043396569f201cd6dde0a997733a71a4bb7a..077ebbd5e10f14dc646148aae9231acf007e8a4d 100644 (file)
@@ -174,7 +174,8 @@ static inline bool cputimer_running(struct task_struct *tsk)
 {
        struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
 
-       if (!cputimer->running)
+       /* Check if cputimer isn't running. This is accessed without locking. */
+       if (!READ_ONCE(cputimer->running))
                return false;
 
        /*
@@ -215,9 +216,7 @@ static inline void account_group_user_time(struct task_struct *tsk,
        if (!cputimer_running(tsk))
                return;
 
-       raw_spin_lock(&cputimer->lock);
-       cputimer->cputime.utime += cputime;
-       raw_spin_unlock(&cputimer->lock);
+       atomic64_add(cputime, &cputimer->cputime_atomic.utime);
 }
 
 /**
@@ -238,9 +237,7 @@ static inline void account_group_system_time(struct task_struct *tsk,
        if (!cputimer_running(tsk))
                return;
 
-       raw_spin_lock(&cputimer->lock);
-       cputimer->cputime.stime += cputime;
-       raw_spin_unlock(&cputimer->lock);
+       atomic64_add(cputime, &cputimer->cputime_atomic.stime);
 }
 
 /**
@@ -261,7 +258,5 @@ static inline void account_group_exec_runtime(struct task_struct *tsk,
        if (!cputimer_running(tsk))
                return;
 
-       raw_spin_lock(&cputimer->lock);
-       cputimer->cputime.sum_exec_runtime += ns;
-       raw_spin_unlock(&cputimer->lock);
+       atomic64_add(ns, &cputimer->cputime_atomic.sum_exec_runtime);
 }
index 852143a79f367fb75fd73f6c60e2dcaf1ae77895..052e02672d12428ce1e9e1f7266c7cd754ace5af 100644 (file)
@@ -341,7 +341,7 @@ long wait_woken(wait_queue_t *wait, unsigned mode, long timeout)
         * condition being true _OR_ WQ_FLAG_WOKEN such that we will not miss
         * an event.
         */
-       set_mb(wait->flags, wait->flags & ~WQ_FLAG_WOKEN); /* B */
+       smp_store_mb(wait->flags, wait->flags & ~WQ_FLAG_WOKEN); /* B */
 
        return timeout;
 }
@@ -354,7 +354,7 @@ int woken_wake_function(wait_queue_t *wait, unsigned mode, int sync, void *key)
         * doesn't imply write barrier and the users expects write
         * barrier semantics on wakeup functions.  The following
         * smp_wmb() is equivalent to smp_wmb() in try_to_wake_up()
-        * and is paired with set_mb() in wait_woken().
+        * and is paired with smp_store_mb() in wait_woken().
         */
        smp_wmb(); /* C */
        wait->flags |= WQ_FLAG_WOKEN;
@@ -601,7 +601,7 @@ EXPORT_SYMBOL(bit_wait_io);
 
 __sched int bit_wait_timeout(struct wait_bit_key *word)
 {
-       unsigned long now = ACCESS_ONCE(jiffies);
+       unsigned long now = READ_ONCE(jiffies);
        if (signal_pending_state(current->state, current))
                return 1;
        if (time_after_eq(now, word->timeout))
@@ -613,7 +613,7 @@ EXPORT_SYMBOL_GPL(bit_wait_timeout);
 
 __sched int bit_wait_io_timeout(struct wait_bit_key *word)
 {
-       unsigned long now = ACCESS_ONCE(jiffies);
+       unsigned long now = READ_ONCE(jiffies);
        if (signal_pending_state(current->state, current))
                return 1;
        if (time_after_eq(now, word->timeout))
index d51c5ddd855c84b9b65d4a7ef22eedcdff2eeafa..f19833b5db3c9121b540127004191a443c0a7fec 100644 (file)
@@ -245,7 +245,7 @@ static inline void print_dropped_signal(int sig)
  * RETURNS:
  * %true if @mask is set, %false if made noop because @task was dying.
  */
-bool task_set_jobctl_pending(struct task_struct *task, unsigned int mask)
+bool task_set_jobctl_pending(struct task_struct *task, unsigned long mask)
 {
        BUG_ON(mask & ~(JOBCTL_PENDING_MASK | JOBCTL_STOP_CONSUME |
                        JOBCTL_STOP_SIGMASK | JOBCTL_TRAPPING));
@@ -297,7 +297,7 @@ void task_clear_jobctl_trapping(struct task_struct *task)
  * CONTEXT:
  * Must be called with @task->sighand->siglock held.
  */
-void task_clear_jobctl_pending(struct task_struct *task, unsigned int mask)
+void task_clear_jobctl_pending(struct task_struct *task, unsigned long mask)
 {
        BUG_ON(mask & ~JOBCTL_PENDING_MASK);
 
@@ -2000,7 +2000,7 @@ static bool do_signal_stop(int signr)
        struct signal_struct *sig = current->signal;
 
        if (!(current->jobctl & JOBCTL_STOP_PENDING)) {
-               unsigned int gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
+               unsigned long gstop = JOBCTL_STOP_PENDING | JOBCTL_STOP_CONSUME;
                struct task_struct *t;
 
                /* signr will be recorded in task->jobctl for retries */
index 695f0c6cd169a307de1f216cc67b4330a83363a4..fd643d8c4b424f858e3c6d7e0996bbb919a555e7 100644 (file)
@@ -211,25 +211,6 @@ static int multi_cpu_stop(void *data)
        return err;
 }
 
-struct irq_cpu_stop_queue_work_info {
-       int cpu1;
-       int cpu2;
-       struct cpu_stop_work *work1;
-       struct cpu_stop_work *work2;
-};
-
-/*
- * This function is always run with irqs and preemption disabled.
- * This guarantees that both work1 and work2 get queued, before
- * our local migrate thread gets the chance to preempt us.
- */
-static void irq_cpu_stop_queue_work(void *arg)
-{
-       struct irq_cpu_stop_queue_work_info *info = arg;
-       cpu_stop_queue_work(info->cpu1, info->work1);
-       cpu_stop_queue_work(info->cpu2, info->work2);
-}
-
 /**
  * stop_two_cpus - stops two cpus
  * @cpu1: the cpu to stop
@@ -245,7 +226,6 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
 {
        struct cpu_stop_done done;
        struct cpu_stop_work work1, work2;
-       struct irq_cpu_stop_queue_work_info call_args;
        struct multi_stop_data msdata;
 
        preempt_disable();
@@ -262,13 +242,6 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
                .done = &done
        };
 
-       call_args = (struct irq_cpu_stop_queue_work_info){
-               .cpu1 = cpu1,
-               .cpu2 = cpu2,
-               .work1 = &work1,
-               .work2 = &work2,
-       };
-
        cpu_stop_init_done(&done, 2);
        set_state(&msdata, MULTI_STOP_PREPARE);
 
@@ -285,16 +258,11 @@ int stop_two_cpus(unsigned int cpu1, unsigned int cpu2, cpu_stop_fn_t fn, void *
                return -ENOENT;
        }
 
-       lg_local_lock(&stop_cpus_lock);
-       /*
-        * Queuing needs to be done by the lowest numbered CPU, to ensure
-        * that works are always queued in the same order on every CPU.
-        * This prevents deadlocks.
-        */
-       smp_call_function_single(min(cpu1, cpu2),
-                                &irq_cpu_stop_queue_work,
-                                &call_args, 1);
-       lg_local_unlock(&stop_cpus_lock);
+       lg_double_lock(&stop_cpus_lock, cpu1, cpu2);
+       cpu_stop_queue_work(cpu1, &work1);
+       cpu_stop_queue_work(cpu2, &work2);
+       lg_double_unlock(&stop_cpus_lock, cpu1, cpu2);
+
        preempt_enable();
 
        wait_for_completion(&done.completion);
index a4e372b798a5f29535f9120b32fea70b0489f603..8571296b7ddb9b5efffed3f6fc30c29b34c33099 100644 (file)
 # define SET_TSC_CTL(a)                (-EINVAL)
 #endif
 #ifndef MPX_ENABLE_MANAGEMENT
-# define MPX_ENABLE_MANAGEMENT(a)      (-EINVAL)
+# define MPX_ENABLE_MANAGEMENT(      (-EINVAL)
 #endif
 #ifndef MPX_DISABLE_MANAGEMENT
-# define MPX_DISABLE_MANAGEMENT(a)     (-EINVAL)
+# define MPX_DISABLE_MANAGEMENT(     (-EINVAL)
 #endif
 #ifndef GET_FP_MODE
 # define GET_FP_MODE(a)                (-EINVAL)
@@ -2230,12 +2230,12 @@ SYSCALL_DEFINE5(prctl, int, option, unsigned long, arg2, unsigned long, arg3,
        case PR_MPX_ENABLE_MANAGEMENT:
                if (arg2 || arg3 || arg4 || arg5)
                        return -EINVAL;
-               error = MPX_ENABLE_MANAGEMENT(me);
+               error = MPX_ENABLE_MANAGEMENT();
                break;
        case PR_MPX_DISABLE_MANAGEMENT:
                if (arg2 || arg3 || arg4 || arg5)
                        return -EINVAL;
-               error = MPX_DISABLE_MANAGEMENT(me);
+               error = MPX_DISABLE_MANAGEMENT();
                break;
        case PR_SET_FP_MODE:
                error = SET_FP_MODE(me, arg2);
index 2082b1a88fb9a451a00a759379bec8d786c3bab7..b13e9d2de302411438ba62898ca27697130d38b0 100644 (file)
@@ -349,15 +349,6 @@ static struct ctl_table kern_table[] = {
                .mode           = 0644,
                .proc_handler   = proc_dointvec,
        },
-       {
-               .procname       = "timer_migration",
-               .data           = &sysctl_timer_migration,
-               .maxlen         = sizeof(unsigned int),
-               .mode           = 0644,
-               .proc_handler   = proc_dointvec_minmax,
-               .extra1         = &zero,
-               .extra2         = &one,
-       },
 #endif /* CONFIG_SMP */
 #ifdef CONFIG_NUMA_BALANCING
        {
@@ -1132,6 +1123,15 @@ static struct ctl_table kern_table[] = {
                .extra1         = &zero,
                .extra2         = &one,
        },
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+       {
+               .procname       = "timer_migration",
+               .data           = &sysctl_timer_migration,
+               .maxlen         = sizeof(unsigned int),
+               .mode           = 0644,
+               .proc_handler   = timer_migration_handler,
+       },
+#endif
        { }
 };
 
index 01f0312419b3cb44d8fa455d8cfaa2ad14d5ef0d..ffc4cc3dcd47b4e277df56735e11d51abdd87f6d 100644 (file)
@@ -13,19 +13,4 @@ obj-$(CONFIG_TIMER_STATS)                    += timer_stats.o
 obj-$(CONFIG_DEBUG_FS)                         += timekeeping_debug.o
 obj-$(CONFIG_TEST_UDELAY)                      += test_udelay.o
 
-$(obj)/time.o: $(obj)/timeconst.h
-
-quiet_cmd_hzfile = HZFILE  $@
-      cmd_hzfile = echo "hz=$(CONFIG_HZ)" > $@
-
-targets += hz.bc
-$(obj)/hz.bc: $(objtree)/include/config/hz.h FORCE
-       $(call if_changed,hzfile)
-
-quiet_cmd_bc  = BC      $@
-      cmd_bc  = bc -q $(filter-out FORCE,$^) > $@
-
-targets += timeconst.h
-$(obj)/timeconst.h: $(obj)/hz.bc $(src)/timeconst.bc FORCE
-       $(call if_changed,bc)
-
+$(obj)/time.o: $(objtree)/include/config/
index 1b001ed1edb945cd5a95238fc563f93e8971041e..7fbba635a5499805c316c36e99910f1d96eb6fb6 100644 (file)
@@ -317,19 +317,16 @@ EXPORT_SYMBOL_GPL(alarm_init);
  * @alarm: ptr to alarm to set
  * @start: time to run the alarm
  */
-int alarm_start(struct alarm *alarm, ktime_t start)
+void alarm_start(struct alarm *alarm, ktime_t start)
 {
        struct alarm_base *base = &alarm_bases[alarm->type];
        unsigned long flags;
-       int ret;
 
        spin_lock_irqsave(&base->lock, flags);
        alarm->node.expires = start;
        alarmtimer_enqueue(base, alarm);
-       ret = hrtimer_start(&alarm->timer, alarm->node.expires,
-                               HRTIMER_MODE_ABS);
+       hrtimer_start(&alarm->timer, alarm->node.expires, HRTIMER_MODE_ABS);
        spin_unlock_irqrestore(&base->lock, flags);
-       return ret;
 }
 EXPORT_SYMBOL_GPL(alarm_start);
 
@@ -338,12 +335,12 @@ EXPORT_SYMBOL_GPL(alarm_start);
  * @alarm: ptr to alarm to set
  * @start: time relative to now to run the alarm
  */
-int alarm_start_relative(struct alarm *alarm, ktime_t start)
+void alarm_start_relative(struct alarm *alarm, ktime_t start)
 {
        struct alarm_base *base = &alarm_bases[alarm->type];
 
        start = ktime_add(start, base->gettime());
-       return alarm_start(alarm, start);
+       alarm_start(alarm, start);
 }
 EXPORT_SYMBOL_GPL(alarm_start_relative);
 
@@ -495,12 +492,12 @@ static enum alarmtimer_restart alarm_handle_timer(struct alarm *alarm,
  */
 static int alarm_clock_getres(const clockid_t which_clock, struct timespec *tp)
 {
-       clockid_t baseid = alarm_bases[clock2alarm(which_clock)].base_clockid;
-
        if (!alarmtimer_get_rtcdev())
                return -EINVAL;
 
-       return hrtimer_get_res(baseid, tp);
+       tp->tv_sec = 0;
+       tp->tv_nsec = hrtimer_resolution;
+       return 0;
 }
 
 /**
index 11dc22a6983b55da3c2c356cb375fa27dbad0ed8..08ccc3da3ca086366eadaae89f46e0604099d661 100644 (file)
@@ -94,8 +94,8 @@ u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *evt)
 }
 EXPORT_SYMBOL_GPL(clockevent_delta2ns);
 
-static int __clockevents_set_state(struct clock_event_device *dev,
-                                  enum clock_event_state state)
+static int __clockevents_switch_state(struct clock_event_device *dev,
+                                     enum clock_event_state state)
 {
        /* Transition with legacy set_mode() callback */
        if (dev->set_mode) {
@@ -117,11 +117,7 @@ static int __clockevents_set_state(struct clock_event_device *dev,
        /* Transition with new state-specific callbacks */
        switch (state) {
        case CLOCK_EVT_STATE_DETACHED:
-               /*
-                * This is an internal state, which is guaranteed to go from
-                * SHUTDOWN to DETACHED. No driver interaction required.
-                */
-               return 0;
+               /* The clockevent device is getting replaced. Shut it down. */
 
        case CLOCK_EVT_STATE_SHUTDOWN:
                return dev->set_state_shutdown(dev);
@@ -138,32 +134,44 @@ static int __clockevents_set_state(struct clock_event_device *dev,
                        return -ENOSYS;
                return dev->set_state_oneshot(dev);
 
+       case CLOCK_EVT_STATE_ONESHOT_STOPPED:
+               /* Core internal bug */
+               if (WARN_ONCE(!clockevent_state_oneshot(dev),
+                             "Current state: %d\n",
+                             clockevent_get_state(dev)))
+                       return -EINVAL;
+
+               if (dev->set_state_oneshot_stopped)
+                       return dev->set_state_oneshot_stopped(dev);
+               else
+                       return -ENOSYS;
+
        default:
                return -ENOSYS;
        }
 }
 
 /**
- * clockevents_set_state - set the operating state of a clock event device
+ * clockevents_switch_state - set the operating state of a clock event device
  * @dev:       device to modify
  * @state:     new state
  *
  * Must be called with interrupts disabled !
  */
-void clockevents_set_state(struct clock_event_device *dev,
-                          enum clock_event_state state)
+void clockevents_switch_state(struct clock_event_device *dev,
+                             enum clock_event_state state)
 {
-       if (dev->state != state) {
-               if (__clockevents_set_state(dev, state))
+       if (clockevent_get_state(dev) != state) {
+               if (__clockevents_switch_state(dev, state))
                        return;
 
-               dev->state = state;
+               clockevent_set_state(dev, state);
 
                /*
                 * A nsec2cyc multiplicator of 0 is invalid and we'd crash
                 * on it, so fix it up and emit a warning:
                 */
-               if (state == CLOCK_EVT_STATE_ONESHOT) {
+               if (clockevent_state_oneshot(dev)) {
                        if (unlikely(!dev->mult)) {
                                dev->mult = 1;
                                WARN_ON(1);
@@ -178,7 +186,7 @@ void clockevents_set_state(struct clock_event_device *dev,
  */
 void clockevents_shutdown(struct clock_event_device *dev)
 {
-       clockevents_set_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
+       clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
        dev->next_event.tv64 = KTIME_MAX;
 }
 
@@ -252,7 +260,7 @@ static int clockevents_program_min_delta(struct clock_event_device *dev)
                delta = dev->min_delta_ns;
                dev->next_event = ktime_add_ns(ktime_get(), delta);
 
-               if (dev->state == CLOCK_EVT_STATE_SHUTDOWN)
+               if (clockevent_state_shutdown(dev))
                        return 0;
 
                dev->retries++;
@@ -289,7 +297,7 @@ static int clockevents_program_min_delta(struct clock_event_device *dev)
        delta = dev->min_delta_ns;
        dev->next_event = ktime_add_ns(ktime_get(), delta);
 
-       if (dev->state == CLOCK_EVT_STATE_SHUTDOWN)
+       if (clockevent_state_shutdown(dev))
                return 0;
 
        dev->retries++;
@@ -321,9 +329,13 @@ int clockevents_program_event(struct clock_event_device *dev, ktime_t expires,
 
        dev->next_event = expires;
 
-       if (dev->state == CLOCK_EVT_STATE_SHUTDOWN)
+       if (clockevent_state_shutdown(dev))
                return 0;
 
+       /* We must be in ONESHOT state here */
+       WARN_ONCE(!clockevent_state_oneshot(dev), "Current state: %d\n",
+                 clockevent_get_state(dev));
+
        /* Shortcut for clockevent devices that can deal with ktime. */
        if (dev->features & CLOCK_EVT_FEAT_KTIME)
                return dev->set_next_ktime(expires, dev);
@@ -366,7 +378,7 @@ static int clockevents_replace(struct clock_event_device *ced)
        struct clock_event_device *dev, *newdev = NULL;
 
        list_for_each_entry(dev, &clockevent_devices, list) {
-               if (dev == ced || dev->state != CLOCK_EVT_STATE_DETACHED)
+               if (dev == ced || !clockevent_state_detached(dev))
                        continue;
 
                if (!tick_check_replacement(newdev, dev))
@@ -392,7 +404,7 @@ static int clockevents_replace(struct clock_event_device *ced)
 static int __clockevents_try_unbind(struct clock_event_device *ced, int cpu)
 {
        /* Fast track. Device is unused */
-       if (ced->state == CLOCK_EVT_STATE_DETACHED) {
+       if (clockevent_state_detached(ced)) {
                list_del_init(&ced->list);
                return 0;
        }
@@ -449,7 +461,8 @@ static int clockevents_sanity_check(struct clock_event_device *dev)
        if (dev->set_mode) {
                /* We shouldn't be supporting new modes now */
                WARN_ON(dev->set_state_periodic || dev->set_state_oneshot ||
-                       dev->set_state_shutdown || dev->tick_resume);
+                       dev->set_state_shutdown || dev->tick_resume ||
+                       dev->set_state_oneshot_stopped);
 
                BUG_ON(dev->mode != CLOCK_EVT_MODE_UNUSED);
                return 0;
@@ -484,7 +497,7 @@ void clockevents_register_device(struct clock_event_device *dev)
        BUG_ON(clockevents_sanity_check(dev));
 
        /* Initialize state to DETACHED */
-       dev->state = CLOCK_EVT_STATE_DETACHED;
+       clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED);
 
        if (!dev->cpumask) {
                WARN_ON(num_possible_cpus() > 1);
@@ -549,11 +562,11 @@ int __clockevents_update_freq(struct clock_event_device *dev, u32 freq)
 {
        clockevents_config(dev, freq);
 
-       if (dev->state == CLOCK_EVT_STATE_ONESHOT)
+       if (clockevent_state_oneshot(dev))
                return clockevents_program_event(dev, dev->next_event, false);
 
-       if (dev->state == CLOCK_EVT_STATE_PERIODIC)
-               return __clockevents_set_state(dev, CLOCK_EVT_STATE_PERIODIC);
+       if (clockevent_state_periodic(dev))
+               return __clockevents_switch_state(dev, CLOCK_EVT_STATE_PERIODIC);
 
        return 0;
 }
@@ -607,13 +620,13 @@ void clockevents_exchange_device(struct clock_event_device *old,
         */
        if (old) {
                module_put(old->owner);
-               clockevents_set_state(old, CLOCK_EVT_STATE_DETACHED);
+               clockevents_switch_state(old, CLOCK_EVT_STATE_DETACHED);
                list_del(&old->list);
                list_add(&old->list, &clockevents_released);
        }
 
        if (new) {
-               BUG_ON(new->state != CLOCK_EVT_STATE_DETACHED);
+               BUG_ON(!clockevent_state_detached(new));
                clockevents_shutdown(new);
        }
 }
@@ -626,7 +639,7 @@ void clockevents_suspend(void)
        struct clock_event_device *dev;
 
        list_for_each_entry_reverse(dev, &clockevent_devices, list)
-               if (dev->suspend)
+               if (dev->suspend && !clockevent_state_detached(dev))
                        dev->suspend(dev);
 }
 
@@ -638,7 +651,7 @@ void clockevents_resume(void)
        struct clock_event_device *dev;
 
        list_for_each_entry(dev, &clockevent_devices, list)
-               if (dev->resume)
+               if (dev->resume && !clockevent_state_detached(dev))
                        dev->resume(dev);
 }
 
@@ -669,7 +682,7 @@ void tick_cleanup_dead_cpu(int cpu)
                if (cpumask_test_cpu(cpu, dev->cpumask) &&
                    cpumask_weight(dev->cpumask) == 1 &&
                    !tick_is_broadcast_device(dev)) {
-                       BUG_ON(dev->state != CLOCK_EVT_STATE_DETACHED);
+                       BUG_ON(!clockevent_state_detached(dev));
                        list_del(&dev->list);
                }
        }
index 15facb1b9c606c7a5fa5ea3500ea7dd4bf523477..841b72f720e88041a99ded8852381555c307fb43 100644 (file)
@@ -23,6 +23,8 @@
  *   o Allow clocksource drivers to be unregistered
  */
 
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
 #include <linux/device.h>
 #include <linux/clocksource.h>
 #include <linux/init.h>
@@ -216,10 +218,11 @@ static void clocksource_watchdog(unsigned long data)
 
                /* Check the deviation from the watchdog clocksource. */
                if ((abs(cs_nsec - wd_nsec) > WATCHDOG_THRESHOLD)) {
-                       pr_warn("timekeeping watchdog: Marking clocksource '%s' as unstable, because the skew is too large:\n", cs->name);
-                       pr_warn("       '%s' wd_now: %llx wd_last: %llx mask: %llx\n",
+                       pr_warn("timekeeping watchdog: Marking clocksource '%s' as unstable because the skew is too large:\n",
+                               cs->name);
+                       pr_warn("                      '%s' wd_now: %llx wd_last: %llx mask: %llx\n",
                                watchdog->name, wdnow, wdlast, watchdog->mask);
-                       pr_warn("       '%s' cs_now: %llx cs_last: %llx mask: %llx\n",
+                       pr_warn("                      '%s' cs_now: %llx cs_last: %llx mask: %llx\n",
                                cs->name, csnow, cslast, cs->mask);
                        __clocksource_unstable(cs);
                        continue;
@@ -567,9 +570,8 @@ static void __clocksource_select(bool skipcur)
                 */
                if (!(cs->flags & CLOCK_SOURCE_VALID_FOR_HRES) && oneshot) {
                        /* Override clocksource cannot be used. */
-                       printk(KERN_WARNING "Override clocksource %s is not "
-                              "HRT compatible. Cannot switch while in "
-                              "HRT/NOHZ mode\n", cs->name);
+                       pr_warn("Override clocksource %s is not HRT compatible - cannot switch while in HRT/NOHZ mode\n",
+                               cs->name);
                        override_name[0] = 0;
                } else
                        /* Override clocksource can be used. */
@@ -708,8 +710,8 @@ void __clocksource_update_freq_scale(struct clocksource *cs, u32 scale, u32 freq
 
        clocksource_update_max_deferment(cs);
 
-       pr_info("clocksource %s: mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n",
-                       cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns);
+       pr_info("%s: mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n",
+               cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns);
 }
 EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale);
 
@@ -1008,12 +1010,10 @@ __setup("clocksource=", boot_override_clocksource);
 static int __init boot_override_clock(char* str)
 {
        if (!strcmp(str, "pmtmr")) {
-               printk("Warning: clock=pmtmr is deprecated. "
-                       "Use clocksource=acpi_pm.\n");
+               pr_warn("clock=pmtmr is deprecated - use clocksource=acpi_pm\n");
                return boot_override_clocksource("acpi_pm");
        }
-       printk("Warning! clock= boot option is deprecated. "
-               "Use clocksource=xyz\n");
+       pr_warn("clock= boot option is deprecated - use clocksource=xyz\n");
        return boot_override_clocksource(str);
 }
 
index 76d4bd962b19b3bab345460676954ef6f7c14568..5c7ae4b641c44aca69393a704507630a652381bf 100644 (file)
  */
 DEFINE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases) =
 {
-
        .lock = __RAW_SPIN_LOCK_UNLOCKED(hrtimer_bases.lock),
+       .seq = SEQCNT_ZERO(hrtimer_bases.seq),
        .clock_base =
        {
                {
                        .index = HRTIMER_BASE_MONOTONIC,
                        .clockid = CLOCK_MONOTONIC,
                        .get_time = &ktime_get,
-                       .resolution = KTIME_LOW_RES,
                },
                {
                        .index = HRTIMER_BASE_REALTIME,
                        .clockid = CLOCK_REALTIME,
                        .get_time = &ktime_get_real,
-                       .resolution = KTIME_LOW_RES,
                },
                {
                        .index = HRTIMER_BASE_BOOTTIME,
                        .clockid = CLOCK_BOOTTIME,
                        .get_time = &ktime_get_boottime,
-                       .resolution = KTIME_LOW_RES,
                },
                {
                        .index = HRTIMER_BASE_TAI,
                        .clockid = CLOCK_TAI,
                        .get_time = &ktime_get_clocktai,
-                       .resolution = KTIME_LOW_RES,
                },
        }
 };
@@ -109,33 +105,24 @@ static inline int hrtimer_clockid_to_base(clockid_t clock_id)
        return hrtimer_clock_to_base_table[clock_id];
 }
 
-
-/*
- * Get the coarse grained time at the softirq based on xtime and
- * wall_to_monotonic.
- */
-static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
-{
-       ktime_t xtim, mono, boot, tai;
-       ktime_t off_real, off_boot, off_tai;
-
-       mono = ktime_get_update_offsets_tick(&off_real, &off_boot, &off_tai);
-       boot = ktime_add(mono, off_boot);
-       xtim = ktime_add(mono, off_real);
-       tai = ktime_add(mono, off_tai);
-
-       base->clock_base[HRTIMER_BASE_REALTIME].softirq_time = xtim;
-       base->clock_base[HRTIMER_BASE_MONOTONIC].softirq_time = mono;
-       base->clock_base[HRTIMER_BASE_BOOTTIME].softirq_time = boot;
-       base->clock_base[HRTIMER_BASE_TAI].softirq_time = tai;
-}
-
 /*
  * Functions and macros which are different for UP/SMP systems are kept in a
  * single place
  */
 #ifdef CONFIG_SMP
 
+/*
+ * We require the migration_base for lock_hrtimer_base()/switch_hrtimer_base()
+ * such that hrtimer_callback_running() can unconditionally dereference
+ * timer->base->cpu_base
+ */
+static struct hrtimer_cpu_base migration_cpu_base = {
+       .seq = SEQCNT_ZERO(migration_cpu_base),
+       .clock_base = { { .cpu_base = &migration_cpu_base, }, },
+};
+
+#define migration_base migration_cpu_base.clock_base[0]
+
 /*
  * We are using hashed locking: holding per_cpu(hrtimer_bases)[n].lock
  * means that all timers which are tied to this base via timer->base are
@@ -145,8 +132,8 @@ static void hrtimer_get_softirq_time(struct hrtimer_cpu_base *base)
  * be found on the lists/queues.
  *
  * When the timer's base is locked, and the timer removed from list, it is
- * possible to set timer->base = NULL and drop the lock: the timer remains
- * locked.
+ * possible to set timer->base = &migration_base and drop the lock: the timer
+ * remains locked.
  */
 static
 struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
@@ -156,7 +143,7 @@ struct hrtimer_clock_base *lock_hrtimer_base(const struct hrtimer *timer,
 
        for (;;) {
                base = timer->base;
-               if (likely(base != NULL)) {
+               if (likely(base != &migration_base)) {
                        raw_spin_lock_irqsave(&base->cpu_base->lock, *flags);
                        if (likely(base == timer->base))
                                return base;
@@ -190,6 +177,24 @@ hrtimer_check_target(struct hrtimer *timer, struct hrtimer_clock_base *new_base)
 #endif
 }
 
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+static inline
+struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
+                                        int pinned)
+{
+       if (pinned || !base->migration_enabled)
+               return this_cpu_ptr(&hrtimer_bases);
+       return &per_cpu(hrtimer_bases, get_nohz_timer_target());
+}
+#else
+static inline
+struct hrtimer_cpu_base *get_target_base(struct hrtimer_cpu_base *base,
+                                        int pinned)
+{
+       return this_cpu_ptr(&hrtimer_bases);
+}
+#endif
+
 /*
  * Switch the timer base to the current CPU when possible.
  */
@@ -197,14 +202,13 @@ static inline struct hrtimer_clock_base *
 switch_hrtimer_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
                    int pinned)
 {
+       struct hrtimer_cpu_base *new_cpu_base, *this_base;
        struct hrtimer_clock_base *new_base;
-       struct hrtimer_cpu_base *new_cpu_base;
-       int this_cpu = smp_processor_id();
-       int cpu = get_nohz_timer_target(pinned);
        int basenum = base->index;
 
+       this_base = this_cpu_ptr(&hrtimer_bases);
+       new_cpu_base = get_target_base(this_base, pinned);
 again:
-       new_cpu_base = &per_cpu(hrtimer_bases, cpu);
        new_base = &new_cpu_base->clock_base[basenum];
 
        if (base != new_base) {
@@ -220,22 +224,24 @@ again:
                if (unlikely(hrtimer_callback_running(timer)))
                        return base;
 
-               /* See the comment in lock_timer_base() */
-               timer->base = NULL;
+               /* See the comment in lock_hrtimer_base() */
+               timer->base = &migration_base;
                raw_spin_unlock(&base->cpu_base->lock);
                raw_spin_lock(&new_base->cpu_base->lock);
 
-               if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
-                       cpu = this_cpu;
+               if (new_cpu_base != this_base &&
+                   hrtimer_check_target(timer, new_base)) {
                        raw_spin_unlock(&new_base->cpu_base->lock);
                        raw_spin_lock(&base->cpu_base->lock);
+                       new_cpu_base = this_base;
                        timer->base = base;
                        goto again;
                }
                timer->base = new_base;
        } else {
-               if (cpu != this_cpu && hrtimer_check_target(timer, new_base)) {
-                       cpu = this_cpu;
+               if (new_cpu_base != this_base &&
+                   hrtimer_check_target(timer, new_base)) {
+                       new_cpu_base = this_base;
                        goto again;
                }
        }
@@ -266,21 +272,23 @@ lock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
 /*
  * Divide a ktime value by a nanosecond value
  */
-u64 __ktime_divns(const ktime_t kt, s64 div)
+s64 __ktime_divns(const ktime_t kt, s64 div)
 {
-       u64 dclc;
        int sft = 0;
+       s64 dclc;
+       u64 tmp;
 
        dclc = ktime_to_ns(kt);
+       tmp = dclc < 0 ? -dclc : dclc;
+
        /* Make sure the divisor is less than 2^32: */
        while (div >> 32) {
                sft++;
                div >>= 1;
        }
-       dclc >>= sft;
-       do_div(dclc, (unsigned long) div);
-
-       return dclc;
+       tmp >>= sft;
+       do_div(tmp, (unsigned long) div);
+       return dclc < 0 ? -tmp : tmp;
 }
 EXPORT_SYMBOL_GPL(__ktime_divns);
 #endif /* BITS_PER_LONG >= 64 */
@@ -441,24 +449,35 @@ static inline void debug_deactivate(struct hrtimer *timer)
 }
 
 #if defined(CONFIG_NO_HZ_COMMON) || defined(CONFIG_HIGH_RES_TIMERS)
+static inline void hrtimer_update_next_timer(struct hrtimer_cpu_base *cpu_base,
+                                            struct hrtimer *timer)
+{
+#ifdef CONFIG_HIGH_RES_TIMERS
+       cpu_base->next_timer = timer;
+#endif
+}
+
 static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
 {
        struct hrtimer_clock_base *base = cpu_base->clock_base;
        ktime_t expires, expires_next = { .tv64 = KTIME_MAX };
-       int i;
+       unsigned int active = cpu_base->active_bases;
 
-       for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++, base++) {
+       hrtimer_update_next_timer(cpu_base, NULL);
+       for (; active; base++, active >>= 1) {
                struct timerqueue_node *next;
                struct hrtimer *timer;
 
-               next = timerqueue_getnext(&base->active);
-               if (!next)
+               if (!(active & 0x01))
                        continue;
 
+               next = timerqueue_getnext(&base->active);
                timer = container_of(next, struct hrtimer, node);
                expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
-               if (expires.tv64 < expires_next.tv64)
+               if (expires.tv64 < expires_next.tv64) {
                        expires_next = expires;
+                       hrtimer_update_next_timer(cpu_base, timer);
+               }
        }
        /*
         * clock_was_set() might have changed base->offset of any of
@@ -471,6 +490,16 @@ static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
 }
 #endif
 
+static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
+{
+       ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
+       ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
+       ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
+
+       return ktime_get_update_offsets_now(&base->clock_was_set_seq,
+                                           offs_real, offs_boot, offs_tai);
+}
+
 /* High resolution timer related functions */
 #ifdef CONFIG_HIGH_RES_TIMERS
 
@@ -478,6 +507,8 @@ static ktime_t __hrtimer_get_next_event(struct hrtimer_cpu_base *cpu_base)
  * High resolution timer enabled ?
  */
 static int hrtimer_hres_enabled __read_mostly  = 1;
+unsigned int hrtimer_resolution __read_mostly = LOW_RES_NSEC;
+EXPORT_SYMBOL_GPL(hrtimer_resolution);
 
 /*
  * Enable / Disable high resolution mode
@@ -506,9 +537,14 @@ static inline int hrtimer_is_hres_enabled(void)
 /*
  * Is the high resolution mode active ?
  */
+static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *cpu_base)
+{
+       return cpu_base->hres_active;
+}
+
 static inline int hrtimer_hres_active(void)
 {
-       return __this_cpu_read(hrtimer_bases.hres_active);
+       return __hrtimer_hres_active(this_cpu_ptr(&hrtimer_bases));
 }
 
 /*
@@ -519,7 +555,12 @@ static inline int hrtimer_hres_active(void)
 static void
 hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
 {
-       ktime_t expires_next = __hrtimer_get_next_event(cpu_base);
+       ktime_t expires_next;
+
+       if (!cpu_base->hres_active)
+               return;
+
+       expires_next = __hrtimer_get_next_event(cpu_base);
 
        if (skip_equal && expires_next.tv64 == cpu_base->expires_next.tv64)
                return;
@@ -543,63 +584,53 @@ hrtimer_force_reprogram(struct hrtimer_cpu_base *cpu_base, int skip_equal)
        if (cpu_base->hang_detected)
                return;
 
-       if (cpu_base->expires_next.tv64 != KTIME_MAX)
-               tick_program_event(cpu_base->expires_next, 1);
+       tick_program_event(cpu_base->expires_next, 1);
 }
 
 /*
- * Shared reprogramming for clock_realtime and clock_monotonic
- *
  * When a timer is enqueued and expires earlier than the already enqueued
  * timers, we have to check, whether it expires earlier than the timer for
  * which the clock event device was armed.
  *
- * Note, that in case the state has HRTIMER_STATE_CALLBACK set, no reprogramming
- * and no expiry check happens. The timer gets enqueued into the rbtree. The
- * reprogramming and expiry check is done in the hrtimer_interrupt or in the
- * softirq.
- *
  * Called with interrupts disabled and base->cpu_base.lock held
  */
-static int hrtimer_reprogram(struct hrtimer *timer,
-                            struct hrtimer_clock_base *base)
+static void hrtimer_reprogram(struct hrtimer *timer,
+                             struct hrtimer_clock_base *base)
 {
        struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
        ktime_t expires = ktime_sub(hrtimer_get_expires(timer), base->offset);
-       int res;
 
        WARN_ON_ONCE(hrtimer_get_expires_tv64(timer) < 0);
 
        /*
-        * When the callback is running, we do not reprogram the clock event
-        * device. The timer callback is either running on a different CPU or
-        * the callback is executed in the hrtimer_interrupt context. The
-        * reprogramming is handled either by the softirq, which called the
-        * callback or at the end of the hrtimer_interrupt.
+        * If the timer is not on the current cpu, we cannot reprogram
+        * the other cpus clock event device.
         */
-       if (hrtimer_callback_running(timer))
-               return 0;
+       if (base->cpu_base != cpu_base)
+               return;
+
+       /*
+        * If the hrtimer interrupt is running, then it will
+        * reevaluate the clock bases and reprogram the clock event
+        * device. The callbacks are always executed in hard interrupt
+        * context so we don't need an extra check for a running
+        * callback.
+        */
+       if (cpu_base->in_hrtirq)
+               return;
 
        /*
         * CLOCK_REALTIME timer might be requested with an absolute
-        * expiry time which is less than base->offset. Nothing wrong
-        * about that, just avoid to call into the tick code, which
-        * has now objections against negative expiry values.
+        * expiry time which is less than base->offset. Set it to 0.
         */
        if (expires.tv64 < 0)
-               return -ETIME;
+               expires.tv64 = 0;
 
        if (expires.tv64 >= cpu_base->expires_next.tv64)
-               return 0;
+               return;
 
-       /*
-        * When the target cpu of the timer is currently executing
-        * hrtimer_interrupt(), then we do not touch the clock event
-        * device. hrtimer_interrupt() will reevaluate all clock bases
-        * before reprogramming the device.
-        */
-       if (cpu_base->in_hrtirq)
-               return 0;
+       /* Update the pointer to the next expiring timer */
+       cpu_base->next_timer = timer;
 
        /*
         * If a hang was detected in the last timer interrupt then we
@@ -608,15 +639,14 @@ static int hrtimer_reprogram(struct hrtimer *timer,
         * to make progress.
         */
        if (cpu_base->hang_detected)
-               return 0;
+               return;
 
        /*
-        * Clockevents returns -ETIME, when the event was in the past.
+        * Program the timer hardware. We enforce the expiry for
+        * events which are already in the past.
         */
-       res = tick_program_event(expires, 0);
-       if (!IS_ERR_VALUE(res))
-               cpu_base->expires_next = expires;
-       return res;
+       cpu_base->expires_next = expires;
+       tick_program_event(expires, 1);
 }
 
 /*
@@ -628,15 +658,6 @@ static inline void hrtimer_init_hres(struct hrtimer_cpu_base *base)
        base->hres_active = 0;
 }
 
-static inline ktime_t hrtimer_update_base(struct hrtimer_cpu_base *base)
-{
-       ktime_t *offs_real = &base->clock_base[HRTIMER_BASE_REALTIME].offset;
-       ktime_t *offs_boot = &base->clock_base[HRTIMER_BASE_BOOTTIME].offset;
-       ktime_t *offs_tai = &base->clock_base[HRTIMER_BASE_TAI].offset;
-
-       return ktime_get_update_offsets_now(offs_real, offs_boot, offs_tai);
-}
-
 /*
  * Retrigger next event is called after clock was set
  *
@@ -646,7 +667,7 @@ static void retrigger_next_event(void *arg)
 {
        struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
 
-       if (!hrtimer_hres_active())
+       if (!base->hres_active)
                return;
 
        raw_spin_lock(&base->lock);
@@ -660,29 +681,19 @@ static void retrigger_next_event(void *arg)
  */
 static int hrtimer_switch_to_hres(void)
 {
-       int i, cpu = smp_processor_id();
-       struct hrtimer_cpu_base *base = &per_cpu(hrtimer_bases, cpu);
-       unsigned long flags;
-
-       if (base->hres_active)
-               return 1;
-
-       local_irq_save(flags);
+       struct hrtimer_cpu_base *base = this_cpu_ptr(&hrtimer_bases);
 
        if (tick_init_highres()) {
-               local_irq_restore(flags);
                printk(KERN_WARNING "Could not switch to high resolution "
-                                   "mode on CPU %d\n", cpu);
+                                   "mode on CPU %d\n", base->cpu);
                return 0;
        }
        base->hres_active = 1;
-       for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++)
-               base->clock_base[i].resolution = KTIME_HIGH_RES;
+       hrtimer_resolution = HIGH_RES_NSEC;
 
        tick_setup_sched_timer();
        /* "Retrigger" the interrupt to get things going */
        retrigger_next_event(NULL);
-       local_irq_restore(flags);
        return 1;
 }
 
@@ -704,6 +715,7 @@ void clock_was_set_delayed(void)
 
 #else
 
+static inline int __hrtimer_hres_active(struct hrtimer_cpu_base *b) { return 0; }
 static inline int hrtimer_hres_active(void) { return 0; }
 static inline int hrtimer_is_hres_enabled(void) { return 0; }
 static inline int hrtimer_switch_to_hres(void) { return 0; }
@@ -801,6 +813,14 @@ void unlock_hrtimer_base(const struct hrtimer *timer, unsigned long *flags)
  *
  * Forward the timer expiry so it will expire in the future.
  * Returns the number of overruns.
+ *
+ * Can be safely called from the callback function of @timer. If
+ * called from other contexts @timer must neither be enqueued nor
+ * running the callback and the caller needs to take care of
+ * serialization.
+ *
+ * Note: This only updates the timer expiry value and does not requeue
+ * the timer.
  */
 u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
 {
@@ -812,8 +832,11 @@ u64 hrtimer_forward(struct hrtimer *timer, ktime_t now, ktime_t interval)
        if (delta.tv64 < 0)
                return 0;
 
-       if (interval.tv64 < timer->base->resolution.tv64)
-               interval.tv64 = timer->base->resolution.tv64;
+       if (WARN_ON(timer->state & HRTIMER_STATE_ENQUEUED))
+               return 0;
+
+       if (interval.tv64 < hrtimer_resolution)
+               interval.tv64 = hrtimer_resolution;
 
        if (unlikely(delta.tv64 >= interval.tv64)) {
                s64 incr = ktime_to_ns(interval);
@@ -847,16 +870,11 @@ static int enqueue_hrtimer(struct hrtimer *timer,
 {
        debug_activate(timer);
 
-       timerqueue_add(&base->active, &timer->node);
        base->cpu_base->active_bases |= 1 << base->index;
 
-       /*
-        * HRTIMER_STATE_ENQUEUED is or'ed to the current state to preserve the
-        * state of a possibly running callback.
-        */
-       timer->state |= HRTIMER_STATE_ENQUEUED;
+       timer->state = HRTIMER_STATE_ENQUEUED;
 
-       return (&timer->node == base->active.next);
+       return timerqueue_add(&base->active, &timer->node);
 }
 
 /*
@@ -873,39 +891,38 @@ static void __remove_hrtimer(struct hrtimer *timer,
                             struct hrtimer_clock_base *base,
                             unsigned long newstate, int reprogram)
 {
-       struct timerqueue_node *next_timer;
-       if (!(timer->state & HRTIMER_STATE_ENQUEUED))
-               goto out;
+       struct hrtimer_cpu_base *cpu_base = base->cpu_base;
+       unsigned int state = timer->state;
+
+       timer->state = newstate;
+       if (!(state & HRTIMER_STATE_ENQUEUED))
+               return;
+
+       if (!timerqueue_del(&base->active, &timer->node))
+               cpu_base->active_bases &= ~(1 << base->index);
 
-       next_timer = timerqueue_getnext(&base->active);
-       timerqueue_del(&base->active, &timer->node);
-       if (&timer->node == next_timer) {
 #ifdef CONFIG_HIGH_RES_TIMERS
-               /* Reprogram the clock event device. if enabled */
-               if (reprogram && hrtimer_hres_active()) {
-                       ktime_t expires;
-
-                       expires = ktime_sub(hrtimer_get_expires(timer),
-                                           base->offset);
-                       if (base->cpu_base->expires_next.tv64 == expires.tv64)
-                               hrtimer_force_reprogram(base->cpu_base, 1);
-               }
+       /*
+        * Note: If reprogram is false we do not update
+        * cpu_base->next_timer. This happens when we remove the first
+        * timer on a remote cpu. No harm as we never dereference
+        * cpu_base->next_timer. So the worst thing what can happen is
+        * an superflous call to hrtimer_force_reprogram() on the
+        * remote cpu later on if the same timer gets enqueued again.
+        */
+       if (reprogram && timer == cpu_base->next_timer)
+               hrtimer_force_reprogram(cpu_base, 1);
 #endif
-       }
-       if (!timerqueue_getnext(&base->active))
-               base->cpu_base->active_bases &= ~(1 << base->index);
-out:
-       timer->state = newstate;
 }
 
 /*
  * remove hrtimer, called with base lock held
  */
 static inline int
-remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
+remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base, bool restart)
 {
        if (hrtimer_is_queued(timer)) {
-               unsigned long state;
+               unsigned long state = timer->state;
                int reprogram;
 
                /*
@@ -919,30 +936,35 @@ remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *base)
                debug_deactivate(timer);
                timer_stats_hrtimer_clear_start_info(timer);
                reprogram = base->cpu_base == this_cpu_ptr(&hrtimer_bases);
-               /*
-                * We must preserve the CALLBACK state flag here,
-                * otherwise we could move the timer base in
-                * switch_hrtimer_base.
-                */
-               state = timer->state & HRTIMER_STATE_CALLBACK;
+
+               if (!restart)
+                       state = HRTIMER_STATE_INACTIVE;
+
                __remove_hrtimer(timer, base, state, reprogram);
                return 1;
        }
        return 0;
 }
 
-int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
-               unsigned long delta_ns, const enum hrtimer_mode mode,
-               int wakeup)
+/**
+ * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
+ * @timer:     the timer to be added
+ * @tim:       expiry time
+ * @delta_ns:  "slack" range for the timer
+ * @mode:      expiry mode: absolute (HRTIMER_MODE_ABS) or
+ *             relative (HRTIMER_MODE_REL)
+ */
+void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
+                           unsigned long delta_ns, const enum hrtimer_mode mode)
 {
        struct hrtimer_clock_base *base, *new_base;
        unsigned long flags;
-       int ret, leftmost;
+       int leftmost;
 
        base = lock_hrtimer_base(timer, &flags);
 
        /* Remove an active timer from the queue: */
-       ret = remove_hrtimer(timer, base);
+       remove_hrtimer(timer, base, true);
 
        if (mode & HRTIMER_MODE_REL) {
                tim = ktime_add_safe(tim, base->get_time());
@@ -954,7 +976,7 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
                 * timeouts. This will go away with the GTOD framework.
                 */
 #ifdef CONFIG_TIME_LOW_RES
-               tim = ktime_add_safe(tim, base->resolution);
+               tim = ktime_add_safe(tim, ktime_set(0, hrtimer_resolution));
 #endif
        }
 
@@ -966,84 +988,24 @@ int __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
        timer_stats_hrtimer_set_start_info(timer);
 
        leftmost = enqueue_hrtimer(timer, new_base);
-
-       if (!leftmost) {
-               unlock_hrtimer_base(timer, &flags);
-               return ret;
-       }
+       if (!leftmost)
+               goto unlock;
 
        if (!hrtimer_is_hres_active(timer)) {
                /*
                 * Kick to reschedule the next tick to handle the new timer
                 * on dynticks target.
                 */
-               wake_up_nohz_cpu(new_base->cpu_base->cpu);
-       } else if (new_base->cpu_base == this_cpu_ptr(&hrtimer_bases) &&
-                       hrtimer_reprogram(timer, new_base)) {
-               /*
-                * Only allow reprogramming if the new base is on this CPU.
-                * (it might still be on another CPU if the timer was pending)
-                *
-                * XXX send_remote_softirq() ?
-                */
-               if (wakeup) {
-                       /*
-                        * We need to drop cpu_base->lock to avoid a
-                        * lock ordering issue vs. rq->lock.
-                        */
-                       raw_spin_unlock(&new_base->cpu_base->lock);
-                       raise_softirq_irqoff(HRTIMER_SOFTIRQ);
-                       local_irq_restore(flags);
-                       return ret;
-               } else {
-                       __raise_softirq_irqoff(HRTIMER_SOFTIRQ);
-               }
+               if (new_base->cpu_base->nohz_active)
+                       wake_up_nohz_cpu(new_base->cpu_base->cpu);
+       } else {
+               hrtimer_reprogram(timer, new_base);
        }
-
+unlock:
        unlock_hrtimer_base(timer, &flags);
-
-       return ret;
-}
-EXPORT_SYMBOL_GPL(__hrtimer_start_range_ns);
-
-/**
- * hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
- * @timer:     the timer to be added
- * @tim:       expiry time
- * @delta_ns:  "slack" range for the timer
- * @mode:      expiry mode: absolute (HRTIMER_MODE_ABS) or
- *             relative (HRTIMER_MODE_REL)
- *
- * Returns:
- *  0 on success
- *  1 when the timer was active
- */
-int hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,
-               unsigned long delta_ns, const enum hrtimer_mode mode)
-{
-       return __hrtimer_start_range_ns(timer, tim, delta_ns, mode, 1);
 }
 EXPORT_SYMBOL_GPL(hrtimer_start_range_ns);
 
-/**
- * hrtimer_start - (re)start an hrtimer on the current CPU
- * @timer:     the timer to be added
- * @tim:       expiry time
- * @mode:      expiry mode: absolute (HRTIMER_MODE_ABS) or
- *             relative (HRTIMER_MODE_REL)
- *
- * Returns:
- *  0 on success
- *  1 when the timer was active
- */
-int
-hrtimer_start(struct hrtimer *timer, ktime_t tim, const enum hrtimer_mode mode)
-{
-       return __hrtimer_start_range_ns(timer, tim, 0, mode, 1);
-}
-EXPORT_SYMBOL_GPL(hrtimer_start);
-
-
 /**
  * hrtimer_try_to_cancel - try to deactivate a timer
  * @timer:     hrtimer to stop
@@ -1060,10 +1022,19 @@ int hrtimer_try_to_cancel(struct hrtimer *timer)
        unsigned long flags;
        int ret = -1;
 
+       /*
+        * Check lockless first. If the timer is not active (neither
+        * enqueued nor running the callback, nothing to do here.  The
+        * base lock does not serialize against a concurrent enqueue,
+        * so we can avoid taking it.
+        */
+       if (!hrtimer_active(timer))
+               return 0;
+
        base = lock_hrtimer_base(timer, &flags);
 
        if (!hrtimer_callback_running(timer))
-               ret = remove_hrtimer(timer, base);
+               ret = remove_hrtimer(timer, base, false);
 
        unlock_hrtimer_base(timer, &flags);
 
@@ -1113,26 +1084,22 @@ EXPORT_SYMBOL_GPL(hrtimer_get_remaining);
 /**
  * hrtimer_get_next_event - get the time until next expiry event
  *
- * Returns the delta to the next expiry event or KTIME_MAX if no timer
- * is pending.
+ * Returns the next expiry time or KTIME_MAX if no timer is pending.
  */
-ktime_t hrtimer_get_next_event(void)
+u64 hrtimer_get_next_event(void)
 {
        struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
-       ktime_t mindelta = { .tv64 = KTIME_MAX };
+       u64 expires = KTIME_MAX;
        unsigned long flags;
 
        raw_spin_lock_irqsave(&cpu_base->lock, flags);
 
-       if (!hrtimer_hres_active())
-               mindelta = ktime_sub(__hrtimer_get_next_event(cpu_base),
-                                    ktime_get());
+       if (!__hrtimer_hres_active(cpu_base))
+               expires = __hrtimer_get_next_event(cpu_base).tv64;
 
        raw_spin_unlock_irqrestore(&cpu_base->lock, flags);
 
-       if (mindelta.tv64 < 0)
-               mindelta.tv64 = 0;
-       return mindelta;
+       return expires;
 }
 #endif
 
@@ -1174,37 +1141,73 @@ void hrtimer_init(struct hrtimer *timer, clockid_t clock_id,
 }
 EXPORT_SYMBOL_GPL(hrtimer_init);
 
-/**
- * hrtimer_get_res - get the timer resolution for a clock
- * @which_clock: which clock to query
- * @tp:                 pointer to timespec variable to store the resolution
+/*
+ * A timer is active, when it is enqueued into the rbtree or the
+ * callback function is running or it's in the state of being migrated
+ * to another cpu.
  *
- * Store the resolution of the clock selected by @which_clock in the
- * variable pointed to by @tp.
+ * It is important for this function to not return a false negative.
  */
-int hrtimer_get_res(const clockid_t which_clock, struct timespec *tp)
+bool hrtimer_active(const struct hrtimer *timer)
 {
        struct hrtimer_cpu_base *cpu_base;
-       int base = hrtimer_clockid_to_base(which_clock);
+       unsigned int seq;
 
-       cpu_base = raw_cpu_ptr(&hrtimer_bases);
-       *tp = ktime_to_timespec(cpu_base->clock_base[base].resolution);
+       do {
+               cpu_base = READ_ONCE(timer->base->cpu_base);
+               seq = raw_read_seqcount_begin(&cpu_base->seq);
 
-       return 0;
+               if (timer->state != HRTIMER_STATE_INACTIVE ||
+                   cpu_base->running == timer)
+                       return true;
+
+       } while (read_seqcount_retry(&cpu_base->seq, seq) ||
+                cpu_base != READ_ONCE(timer->base->cpu_base));
+
+       return false;
 }
-EXPORT_SYMBOL_GPL(hrtimer_get_res);
+EXPORT_SYMBOL_GPL(hrtimer_active);
 
-static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
+/*
+ * The write_seqcount_barrier()s in __run_hrtimer() split the thing into 3
+ * distinct sections:
+ *
+ *  - queued:  the timer is queued
+ *  - callback:        the timer is being ran
+ *  - post:    the timer is inactive or (re)queued
+ *
+ * On the read side we ensure we observe timer->state and cpu_base->running
+ * from the same section, if anything changed while we looked at it, we retry.
+ * This includes timer->base changing because sequence numbers alone are
+ * insufficient for that.
+ *
+ * The sequence numbers are required because otherwise we could still observe
+ * a false negative if the read side got smeared over multiple consequtive
+ * __run_hrtimer() invocations.
+ */
+
+static void __run_hrtimer(struct hrtimer_cpu_base *cpu_base,
+                         struct hrtimer_clock_base *base,
+                         struct hrtimer *timer, ktime_t *now)
 {
-       struct hrtimer_clock_base *base = timer->base;
-       struct hrtimer_cpu_base *cpu_base = base->cpu_base;
        enum hrtimer_restart (*fn)(struct hrtimer *);
        int restart;
 
-       WARN_ON(!irqs_disabled());
+       lockdep_assert_held(&cpu_base->lock);
 
        debug_deactivate(timer);
-       __remove_hrtimer(timer, base, HRTIMER_STATE_CALLBACK, 0);
+       cpu_base->running = timer;
+
+       /*
+        * Separate the ->running assignment from the ->state assignment.
+        *
+        * As with a regular write barrier, this ensures the read side in
+        * hrtimer_active() cannot observe cpu_base->running == NULL &&
+        * timer->state == INACTIVE.
+        */
+       raw_write_seqcount_barrier(&cpu_base->seq);
+
+       __remove_hrtimer(timer, base, HRTIMER_STATE_INACTIVE, 0);
        timer_stats_account_hrtimer(timer);
        fn = timer->function;
 
@@ -1220,58 +1223,43 @@ static void __run_hrtimer(struct hrtimer *timer, ktime_t *now)
        raw_spin_lock(&cpu_base->lock);
 
        /*
-        * Note: We clear the CALLBACK bit after enqueue_hrtimer and
+        * Note: We clear the running state after enqueue_hrtimer and
         * we do not reprogramm the event hardware. Happens either in
         * hrtimer_start_range_ns() or in hrtimer_interrupt()
+        *
+        * Note: Because we dropped the cpu_base->lock above,
+        * hrtimer_start_range_ns() can have popped in and enqueued the timer
+        * for us already.
         */
-       if (restart != HRTIMER_NORESTART) {
-               BUG_ON(timer->state != HRTIMER_STATE_CALLBACK);
+       if (restart != HRTIMER_NORESTART &&
+           !(timer->state & HRTIMER_STATE_ENQUEUED))
                enqueue_hrtimer(timer, base);
-       }
 
-       WARN_ON_ONCE(!(timer->state & HRTIMER_STATE_CALLBACK));
+       /*
+        * Separate the ->running assignment from the ->state assignment.
+        *
+        * As with a regular write barrier, this ensures the read side in
+        * hrtimer_active() cannot observe cpu_base->running == NULL &&
+        * timer->state == INACTIVE.
+        */
+       raw_write_seqcount_barrier(&cpu_base->seq);
 
-       timer->state &= ~HRTIMER_STATE_CALLBACK;
+       WARN_ON_ONCE(cpu_base->running != timer);
+       cpu_base->running = NULL;
 }
 
-#ifdef CONFIG_HIGH_RES_TIMERS
-
-/*
- * High resolution timer interrupt
- * Called with interrupts disabled
- */
-void hrtimer_interrupt(struct clock_event_device *dev)
+static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)
 {
-       struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
-       ktime_t expires_next, now, entry_time, delta;
-       int i, retries = 0;
-
-       BUG_ON(!cpu_base->hres_active);
-       cpu_base->nr_events++;
-       dev->next_event.tv64 = KTIME_MAX;
-
-       raw_spin_lock(&cpu_base->lock);
-       entry_time = now = hrtimer_update_base(cpu_base);
-retry:
-       cpu_base->in_hrtirq = 1;
-       /*
-        * We set expires_next to KTIME_MAX here with cpu_base->lock
-        * held to prevent that a timer is enqueued in our queue via
-        * the migration code. This does not affect enqueueing of
-        * timers which run their callback and need to be requeued on
-        * this CPU.
-        */
-       cpu_base->expires_next.tv64 = KTIME_MAX;
+       struct hrtimer_clock_base *base = cpu_base->clock_base;
+       unsigned int active = cpu_base->active_bases;
 
-       for (i = 0; i < HRTIMER_MAX_CLOCK_BASES; i++) {
-               struct hrtimer_clock_base *base;
+       for (; active; base++, active >>= 1) {
                struct timerqueue_node *node;
                ktime_t basenow;
 
-               if (!(cpu_base->active_bases & (1 << i)))
+               if (!(active & 0x01))
                        continue;
 
-               base = cpu_base->clock_base + i;
                basenow = ktime_add(now, base->offset);
 
                while ((node = timerqueue_getnext(&base->active))) {
@@ -1294,9 +1282,42 @@ retry:
                        if (basenow.tv64 < hrtimer_get_softexpires_tv64(timer))
                                break;
 
-                       __run_hrtimer(timer, &basenow);
+                       __run_hrtimer(cpu_base, base, timer, &basenow);
                }
        }
+}
+
+#ifdef CONFIG_HIGH_RES_TIMERS
+
+/*
+ * High resolution timer interrupt
+ * Called with interrupts disabled
+ */
+void hrtimer_interrupt(struct clock_event_device *dev)
+{
+       struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
+       ktime_t expires_next, now, entry_time, delta;
+       int retries = 0;
+
+       BUG_ON(!cpu_base->hres_active);
+       cpu_base->nr_events++;
+       dev->next_event.tv64 = KTIME_MAX;
+
+       raw_spin_lock(&cpu_base->lock);
+       entry_time = now = hrtimer_update_base(cpu_base);
+retry:
+       cpu_base->in_hrtirq = 1;
+       /*
+        * We set expires_next to KTIME_MAX here with cpu_base->lock
+        * held to prevent that a timer is enqueued in our queue via
+        * the migration code. This does not affect enqueueing of
+        * timers which run their callback and need to be requeued on
+        * this CPU.
+        */
+       cpu_base->expires_next.tv64 = KTIME_MAX;
+
+       __hrtimer_run_queues(cpu_base, now);
+
        /* Reevaluate the clock bases for the next expiry */
        expires_next = __hrtimer_get_next_event(cpu_base);
        /*
@@ -1308,8 +1329,7 @@ retry:
        raw_spin_unlock(&cpu_base->lock);
 
        /* Reprogramming necessary ? */
-       if (expires_next.tv64 == KTIME_MAX ||
-           !tick_program_event(expires_next, 0)) {
+       if (!tick_program_event(expires_next, 0)) {
                cpu_base->hang_detected = 0;
                return;
        }
@@ -1342,8 +1362,8 @@ retry:
        cpu_base->hang_detected = 1;
        raw_spin_unlock(&cpu_base->lock);
        delta = ktime_sub(now, entry_time);
-       if (delta.tv64 > cpu_base->max_hang_time.tv64)
-               cpu_base->max_hang_time = delta;
+       if ((unsigned int)delta.tv64 > cpu_base->max_hang_time)
+               cpu_base->max_hang_time = (unsigned int) delta.tv64;
        /*
         * Limit it to a sensible value as we enforce a longer
         * delay. Give the CPU at least 100ms to catch up.
@@ -1361,7 +1381,7 @@ retry:
  * local version of hrtimer_peek_ahead_timers() called with interrupts
  * disabled.
  */
-static void __hrtimer_peek_ahead_timers(void)
+static inline void __hrtimer_peek_ahead_timers(void)
 {
        struct tick_device *td;
 
@@ -1373,29 +1393,6 @@ static void __hrtimer_peek_ahead_timers(void)
                hrtimer_interrupt(td->evtdev);
 }
 
-/**
- * hrtimer_peek_ahead_timers -- run soft-expired timers now
- *
- * hrtimer_peek_ahead_timers will peek at the timer queue of
- * the current cpu and check if there are any timers for which
- * the soft expires time has passed. If any such timers exist,
- * they are run immediately and then removed from the timer queue.
- *
- */
-void hrtimer_peek_ahead_timers(void)
-{
-       unsigned long flags;
-
-       local_irq_save(flags);
-       __hrtimer_peek_ahead_timers();
-       local_irq_restore(flags);
-}
-
-static void run_hrtimer_softirq(struct softirq_action *h)
-{
-       hrtimer_peek_ahead_timers();
-}
-
 #else /* CONFIG_HIGH_RES_TIMERS */
 
 static inline void __hrtimer_peek_ahead_timers(void) { }
@@ -1403,66 +1400,32 @@ static inline void __hrtimer_peek_ahead_timers(void) { }
 #endif /* !CONFIG_HIGH_RES_TIMERS */
 
 /*
- * Called from timer softirq every jiffy, expire hrtimers:
- *
- * For HRT its the fall back code to run the softirq in the timer
- * softirq context in case the hrtimer initialization failed or has
- * not been done yet.
+ * Called from run_local_timers in hardirq context every jiffy
  */
-void hrtimer_run_pending(void)
+void hrtimer_run_queues(void)
 {
-       if (hrtimer_hres_active())
+       struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
+       ktime_t now;
+
+       if (__hrtimer_hres_active(cpu_base))
                return;
 
        /*
-        * This _is_ ugly: We have to check in the softirq context,
-        * whether we can switch to highres and / or nohz mode. The
-        * clocksource switch happens in the timer interrupt with
-        * xtime_lock held. Notification from there only sets the
-        * check bit in the tick_oneshot code, otherwise we might
-        * deadlock vs. xtime_lock.
+        * This _is_ ugly: We have to check periodically, whether we
+        * can switch to highres and / or nohz mode. The clocksource
+        * switch happens with xtime_lock held. Notification from
+        * there only sets the check bit in the tick_oneshot code,
+        * otherwise we might deadlock vs. xtime_lock.
         */
-       if (tick_check_oneshot_change(!hrtimer_is_hres_enabled()))
+       if (tick_check_oneshot_change(!hrtimer_is_hres_enabled())) {
                hrtimer_switch_to_hres();
-}
-
-/*
- * Called from hardirq context every jiffy
- */
-void hrtimer_run_queues(void)
-{
-       struct timerqueue_node *node;
-       struct hrtimer_cpu_base *cpu_base = this_cpu_ptr(&hrtimer_bases);
-       struct hrtimer_clock_base *base;
-       int index, gettime = 1;
-
-       if (hrtimer_hres_active())
                return;
-
-       for (index = 0; index < HRTIMER_MAX_CLOCK_BASES; index++) {
-               base = &cpu_base->clock_base[index];
-               if (!timerqueue_getnext(&base->active))
-                       continue;
-
-               if (gettime) {
-                       hrtimer_get_softirq_time(cpu_base);
-                       gettime = 0;
-               }
-
-               raw_spin_lock(&cpu_base->lock);
-
-               while ((node = timerqueue_getnext(&base->active))) {
-                       struct hrtimer *timer;
-
-                       timer = container_of(node, struct hrtimer, node);
-                       if (base->softirq_time.tv64 <=
-                                       hrtimer_get_expires_tv64(timer))
-                               break;
-
-                       __run_hrtimer(timer, &base->softirq_time);
-               }
-               raw_spin_unlock(&cpu_base->lock);
        }
+
+       raw_spin_lock(&cpu_base->lock);
+       now = hrtimer_update_base(cpu_base);
+       __hrtimer_run_queues(cpu_base, now);
+       raw_spin_unlock(&cpu_base->lock);
 }
 
 /*
@@ -1495,8 +1458,6 @@ static int __sched do_nanosleep(struct hrtimer_sleeper *t, enum hrtimer_mode mod
        do {
                set_current_state(TASK_INTERRUPTIBLE);
                hrtimer_start_expires(&t->timer, mode);
-               if (!hrtimer_active(&t->timer))
-                       t->task = NULL;
 
                if (likely(t->task))
                        freezable_schedule();
@@ -1640,11 +1601,11 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
                debug_deactivate(timer);
 
                /*
-                * Mark it as STATE_MIGRATE not INACTIVE otherwise the
+                * Mark it as ENQUEUED not INACTIVE otherwise the
                 * timer could be seen as !active and just vanish away
                 * under us on another CPU
                 */
-               __remove_hrtimer(timer, old_base, HRTIMER_STATE_MIGRATE, 0);
+               __remove_hrtimer(timer, old_base, HRTIMER_STATE_ENQUEUED, 0);
                timer->base = new_base;
                /*
                 * Enqueue the timers on the new cpu. This does not
@@ -1655,9 +1616,6 @@ static void migrate_hrtimer_list(struct hrtimer_clock_base *old_base,
                 * event device.
                 */
                enqueue_hrtimer(timer, new_base);
-
-               /* Clear the migration state bit */
-               timer->state &= ~HRTIMER_STATE_MIGRATE;
        }
 }
 
@@ -1729,9 +1687,6 @@ void __init hrtimers_init(void)
        hrtimer_cpu_notify(&hrtimers_nb, (unsigned long)CPU_UP_PREPARE,
                          (void *)(long)smp_processor_id());
        register_cpu_notifier(&hrtimers_nb);
-#ifdef CONFIG_HIGH_RES_TIMERS
-       open_softirq(HRTIMER_SOFTIRQ, run_hrtimer_softirq);
-#endif
 }
 
 /**
@@ -1770,8 +1725,6 @@ schedule_hrtimeout_range_clock(ktime_t *expires, unsigned long delta,
        hrtimer_init_sleeper(&t, current);
 
        hrtimer_start_expires(&t.timer, mode);
-       if (!hrtimer_active(&t.timer))
-               t.task = NULL;
 
        if (likely(t.task))
                schedule();
index 7a681003001c0ee75631e2c5c56e528ec0ea98df..fb4d98c7fd43e715d795d8df2c522c80b2204361 100644 (file)
@@ -35,6 +35,7 @@ unsigned long                 tick_nsec;
 static u64                     tick_length;
 static u64                     tick_length_base;
 
+#define SECS_PER_DAY           86400
 #define MAX_TICKADJ            500LL           /* usecs */
 #define MAX_TICKADJ_SCALED \
        (((MAX_TICKADJ * NSEC_PER_USEC) << NTP_SCALE_SHIFT) / NTP_INTERVAL_FREQ)
@@ -76,6 +77,9 @@ static long                   time_adjust;
 /* constant (boot-param configurable) NTP tick adjustment (upscaled)   */
 static s64                     ntp_tick_adj;
 
+/* second value of the next pending leapsecond, or TIME64_MAX if no leap */
+static time64_t                        ntp_next_leap_sec = TIME64_MAX;
+
 #ifdef CONFIG_NTP_PPS
 
 /*
@@ -349,6 +353,7 @@ void ntp_clear(void)
        tick_length     = tick_length_base;
        time_offset     = 0;
 
+       ntp_next_leap_sec = TIME64_MAX;
        /* Clear PPS state variables */
        pps_clear();
 }
@@ -359,6 +364,21 @@ u64 ntp_tick_length(void)
        return tick_length;
 }
 
+/**
+ * ntp_get_next_leap - Returns the next leapsecond in CLOCK_REALTIME ktime_t
+ *
+ * Provides the time of the next leapsecond against CLOCK_REALTIME in
+ * a ktime_t format. Returns KTIME_MAX if no leapsecond is pending.
+ */
+ktime_t ntp_get_next_leap(void)
+{
+       ktime_t ret;
+
+       if ((time_state == TIME_INS) && (time_status & STA_INS))
+               return ktime_set(ntp_next_leap_sec, 0);
+       ret.tv64 = KTIME_MAX;
+       return ret;
+}
 
 /*
  * this routine handles the overflow of the microsecond field
@@ -382,15 +402,21 @@ int second_overflow(unsigned long secs)
         */
        switch (time_state) {
        case TIME_OK:
-               if (time_status & STA_INS)
+               if (time_status & STA_INS) {
                        time_state = TIME_INS;
-               else if (time_status & STA_DEL)
+                       ntp_next_leap_sec = secs + SECS_PER_DAY -
+                                               (secs % SECS_PER_DAY);
+               } else if (time_status & STA_DEL) {
                        time_state = TIME_DEL;
+                       ntp_next_leap_sec = secs + SECS_PER_DAY -
+                                                ((secs+1) % SECS_PER_DAY);
+               }
                break;
        case TIME_INS:
-               if (!(time_status & STA_INS))
+               if (!(time_status & STA_INS)) {
+                       ntp_next_leap_sec = TIME64_MAX;
                        time_state = TIME_OK;
-               else if (secs % 86400 == 0) {
+               } else if (secs % SECS_PER_DAY == 0) {
                        leap = -1;
                        time_state = TIME_OOP;
                        printk(KERN_NOTICE
@@ -398,19 +424,21 @@ int second_overflow(unsigned long secs)
                }
                break;
        case TIME_DEL:
-               if (!(time_status & STA_DEL))
+               if (!(time_status & STA_DEL)) {
+                       ntp_next_leap_sec = TIME64_MAX;
                        time_state = TIME_OK;
-               else if ((secs + 1) % 86400 == 0) {
+               } else if ((secs + 1) % SECS_PER_DAY == 0) {
                        leap = 1;
+                       ntp_next_leap_sec = TIME64_MAX;
                        time_state = TIME_WAIT;
                        printk(KERN_NOTICE
                                "Clock: deleting leap second 23:59:59 UTC\n");
                }
                break;
        case TIME_OOP:
+               ntp_next_leap_sec = TIME64_MAX;
                time_state = TIME_WAIT;
                break;
-
        case TIME_WAIT:
                if (!(time_status & (STA_INS | STA_DEL)))
                        time_state = TIME_OK;
@@ -547,6 +575,7 @@ static inline void process_adj_status(struct timex *txc, struct timespec64 *ts)
        if ((time_status & STA_PLL) && !(txc->status & STA_PLL)) {
                time_state = TIME_OK;
                time_status = STA_UNSYNC;
+               ntp_next_leap_sec = TIME64_MAX;
                /* restart PPS frequency calibration */
                pps_reset_freq_interval();
        }
@@ -711,6 +740,24 @@ int __do_adjtimex(struct timex *txc, struct timespec64 *ts, s32 *time_tai)
        if (!(time_status & STA_NANO))
                txc->time.tv_usec /= NSEC_PER_USEC;
 
+       /* Handle leapsec adjustments */
+       if (unlikely(ts->tv_sec >= ntp_next_leap_sec)) {
+               if ((time_state == TIME_INS) && (time_status & STA_INS)) {
+                       result = TIME_OOP;
+                       txc->tai++;
+                       txc->time.tv_sec--;
+               }
+               if ((time_state == TIME_DEL) && (time_status & STA_DEL)) {
+                       result = TIME_WAIT;
+                       txc->tai--;
+                       txc->time.tv_sec++;
+               }
+               if ((time_state == TIME_OOP) &&
+                                       (ts->tv_sec == ntp_next_leap_sec)) {
+                       result = TIME_WAIT;
+               }
+       }
+
        return result;
 }
 
index bbd102ad9df7c8fcc5df253faf6970d078ba9db9..65430504ca2630c31d185429e6a2573c817b43ae 100644 (file)
@@ -5,6 +5,7 @@ extern void ntp_init(void);
 extern void ntp_clear(void);
 /* Returns how long ticks are at present, in ns / 2^NTP_SCALE_SHIFT. */
 extern u64 ntp_tick_length(void);
+extern ktime_t ntp_get_next_leap(void);
 extern int second_overflow(unsigned long secs);
 extern int ntp_validate_timex(struct timex *);
 extern int __do_adjtimex(struct timex *, struct timespec64 *, s32 *);
index 0075da74abf0c5f55c823f393e96b99d79b05e13..892e3dae0aac41199e9ebbbdef8b73f6b2d57afd 100644 (file)
@@ -196,39 +196,62 @@ static int cpu_clock_sample(const clockid_t which_clock, struct task_struct *p,
        return 0;
 }
 
-static void update_gt_cputime(struct task_cputime *a, struct task_cputime *b)
+/*
+ * Set cputime to sum_cputime if sum_cputime > cputime. Use cmpxchg
+ * to avoid race conditions with concurrent updates to cputime.
+ */
+static inline void __update_gt_cputime(atomic64_t *cputime, u64 sum_cputime)
 {
-       if (b->utime > a->utime)
-               a->utime = b->utime;
+       u64 curr_cputime;
+retry:
+       curr_cputime = atomic64_read(cputime);
+       if (sum_cputime > curr_cputime) {
+               if (atomic64_cmpxchg(cputime, curr_cputime, sum_cputime) != curr_cputime)
+                       goto retry;
+       }
+}
 
-       if (b->stime > a->stime)
-               a->stime = b->stime;
+static void update_gt_cputime(struct task_cputime_atomic *cputime_atomic, struct task_cputime *sum)
+{
+       __update_gt_cputime(&cputime_atomic->utime, sum->utime);
+       __update_gt_cputime(&cputime_atomic->stime, sum->stime);
+       __update_gt_cputime(&cputime_atomic->sum_exec_runtime, sum->sum_exec_runtime);
+}
 
-       if (b->sum_exec_runtime > a->sum_exec_runtime)
-               a->sum_exec_runtime = b->sum_exec_runtime;
+/* Sample task_cputime_atomic values in "atomic_timers", store results in "times". */
+static inline void sample_cputime_atomic(struct task_cputime *times,
+                                        struct task_cputime_atomic *atomic_times)
+{
+       times->utime = atomic64_read(&atomic_times->utime);
+       times->stime = atomic64_read(&atomic_times->stime);
+       times->sum_exec_runtime = atomic64_read(&atomic_times->sum_exec_runtime);
 }
 
 void thread_group_cputimer(struct task_struct *tsk, struct task_cputime *times)
 {
        struct thread_group_cputimer *cputimer = &tsk->signal->cputimer;
        struct task_cputime sum;
-       unsigned long flags;
 
-       if (!cputimer->running) {
+       /* Check if cputimer isn't running. This is accessed without locking. */
+       if (!READ_ONCE(cputimer->running)) {
                /*
                 * The POSIX timer interface allows for absolute time expiry
                 * values through the TIMER_ABSTIME flag, therefore we have
-                * to synchronize the timer to the clock every time we start
-                * it.
+                * to synchronize the timer to the clock every time we start it.
                 */
                thread_group_cputime(tsk, &sum);
-               raw_spin_lock_irqsave(&cputimer->lock, flags);
-               cputimer->running = 1;
-               update_gt_cputime(&cputimer->cputime, &sum);
-       } else
-               raw_spin_lock_irqsave(&cputimer->lock, flags);
-       *times = cputimer->cputime;
-       raw_spin_unlock_irqrestore(&cputimer->lock, flags);
+               update_gt_cputime(&cputimer->cputime_atomic, &sum);
+
+               /*
+                * We're setting cputimer->running without a lock. Ensure
+                * this only gets written to in one operation. We set
+                * running after update_gt_cputime() as a small optimization,
+                * but barriers are not required because update_gt_cputime()
+                * can handle concurrent updates.
+                */
+               WRITE_ONCE(cputimer->running, 1);
+       }
+       sample_cputime_atomic(times, &cputimer->cputime_atomic);
 }
 
 /*
@@ -582,7 +605,8 @@ bool posix_cpu_timers_can_stop_tick(struct task_struct *tsk)
        if (!task_cputime_zero(&tsk->cputime_expires))
                return false;
 
-       if (tsk->signal->cputimer.running)
+       /* Check if cputimer is running. This is accessed without locking. */
+       if (READ_ONCE(tsk->signal->cputimer.running))
                return false;
 
        return true;
@@ -852,10 +876,10 @@ static void check_thread_timers(struct task_struct *tsk,
        /*
         * Check for the special case thread timers.
         */
-       soft = ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur);
+       soft = READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_cur);
        if (soft != RLIM_INFINITY) {
                unsigned long hard =
-                       ACCESS_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
+                       READ_ONCE(sig->rlim[RLIMIT_RTTIME].rlim_max);
 
                if (hard != RLIM_INFINITY &&
                    tsk->rt.timeout > DIV_ROUND_UP(hard, USEC_PER_SEC/HZ)) {
@@ -882,14 +906,12 @@ static void check_thread_timers(struct task_struct *tsk,
        }
 }
 
-static void stop_process_timers(struct signal_struct *sig)
+static inline void stop_process_timers(struct signal_struct *sig)
 {
        struct thread_group_cputimer *cputimer = &sig->cputimer;
-       unsigned long flags;
 
-       raw_spin_lock_irqsave(&cputimer->lock, flags);
-       cputimer->running = 0;
-       raw_spin_unlock_irqrestore(&cputimer->lock, flags);
+       /* Turn off cputimer->running. This is done without locking. */
+       WRITE_ONCE(cputimer->running, 0);
 }
 
 static u32 onecputick;
@@ -958,11 +980,11 @@ static void check_process_timers(struct task_struct *tsk,
                         SIGPROF);
        check_cpu_itimer(tsk, &sig->it[CPUCLOCK_VIRT], &virt_expires, utime,
                         SIGVTALRM);
-       soft = ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
+       soft = READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_cur);
        if (soft != RLIM_INFINITY) {
                unsigned long psecs = cputime_to_secs(ptime);
                unsigned long hard =
-                       ACCESS_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
+                       READ_ONCE(sig->rlim[RLIMIT_CPU].rlim_max);
                cputime_t x;
                if (psecs >= hard) {
                        /*
@@ -1111,12 +1133,11 @@ static inline int fastpath_timer_check(struct task_struct *tsk)
        }
 
        sig = tsk->signal;
-       if (sig->cputimer.running) {
+       /* Check if cputimer is running. This is accessed without locking. */
+       if (READ_ONCE(sig->cputimer.running)) {
                struct task_cputime group_sample;
 
-               raw_spin_lock(&sig->cputimer.lock);
-               group_sample = sig->cputimer.cputime;
-               raw_spin_unlock(&sig->cputimer.lock);
+               sample_cputime_atomic(&group_sample, &sig->cputimer.cputime_atomic);
 
                if (task_cputime_expired(&group_sample, &sig->cputime_expires))
                        return 1;
@@ -1157,7 +1178,7 @@ void run_posix_cpu_timers(struct task_struct *tsk)
         * If there are any active process wide timers (POSIX 1.b, itimers,
         * RLIMIT_CPU) cputimer must be running.
         */
-       if (tsk->signal->cputimer.running)
+       if (READ_ONCE(tsk->signal->cputimer.running))
                check_process_timers(tsk, &firing);
 
        /*
index 31ea01f42e1f088786a291199cc54e9bde4658c9..31d11ac9fa4739789728c44470b82115ec307d11 100644 (file)
@@ -272,13 +272,20 @@ static int posix_get_tai(clockid_t which_clock, struct timespec *tp)
        return 0;
 }
 
+static int posix_get_hrtimer_res(clockid_t which_clock, struct timespec *tp)
+{
+       tp->tv_sec = 0;
+       tp->tv_nsec = hrtimer_resolution;
+       return 0;
+}
+
 /*
  * Initialize everything, well, just everything in Posix clocks/timers ;)
  */
 static __init int init_posix_timers(void)
 {
        struct k_clock clock_realtime = {
-               .clock_getres   = hrtimer_get_res,
+               .clock_getres   = posix_get_hrtimer_res,
                .clock_get      = posix_clock_realtime_get,
                .clock_set      = posix_clock_realtime_set,
                .clock_adj      = posix_clock_realtime_adj,
@@ -290,7 +297,7 @@ static __init int init_posix_timers(void)
                .timer_del      = common_timer_del,
        };
        struct k_clock clock_monotonic = {
-               .clock_getres   = hrtimer_get_res,
+               .clock_getres   = posix_get_hrtimer_res,
                .clock_get      = posix_ktime_get_ts,
                .nsleep         = common_nsleep,
                .nsleep_restart = hrtimer_nanosleep_restart,
@@ -300,7 +307,7 @@ static __init int init_posix_timers(void)
                .timer_del      = common_timer_del,
        };
        struct k_clock clock_monotonic_raw = {
-               .clock_getres   = hrtimer_get_res,
+               .clock_getres   = posix_get_hrtimer_res,
                .clock_get      = posix_get_monotonic_raw,
        };
        struct k_clock clock_realtime_coarse = {
@@ -312,7 +319,7 @@ static __init int init_posix_timers(void)
                .clock_get      = posix_get_monotonic_coarse,
        };
        struct k_clock clock_tai = {
-               .clock_getres   = hrtimer_get_res,
+               .clock_getres   = posix_get_hrtimer_res,
                .clock_get      = posix_get_tai,
                .nsleep         = common_nsleep,
                .nsleep_restart = hrtimer_nanosleep_restart,
@@ -322,7 +329,7 @@ static __init int init_posix_timers(void)
                .timer_del      = common_timer_del,
        };
        struct k_clock clock_boottime = {
-               .clock_getres   = hrtimer_get_res,
+               .clock_getres   = posix_get_hrtimer_res,
                .clock_get      = posix_get_boottime,
                .nsleep         = common_nsleep,
                .nsleep_restart = hrtimer_nanosleep_restart,
index 6aac4beedbbe235951c0671336e52b2459a047fb..3e7db49a2381d14506a37c69b45268feeec56bef 100644 (file)
@@ -22,6 +22,7 @@ static void bc_set_mode(enum clock_event_mode mode,
                        struct clock_event_device *bc)
 {
        switch (mode) {
+       case CLOCK_EVT_MODE_UNUSED:
        case CLOCK_EVT_MODE_SHUTDOWN:
                /*
                 * Note, we cannot cancel the timer here as we might
@@ -66,9 +67,11 @@ static int bc_set_next(ktime_t expires, struct clock_event_device *bc)
         * hrtimer_{start/cancel} functions call into tracing,
         * calls to these functions must be bound within RCU_NONIDLE.
         */
-       RCU_NONIDLE(bc_moved = (hrtimer_try_to_cancel(&bctimer) >= 0) ?
-               !hrtimer_start(&bctimer, expires, HRTIMER_MODE_ABS_PINNED) :
-                       0);
+       RCU_NONIDLE({
+                       bc_moved = hrtimer_try_to_cancel(&bctimer) >= 0;
+                       if (bc_moved)
+                               hrtimer_start(&bctimer, expires,
+                                             HRTIMER_MODE_ABS_PINNED);});
        if (bc_moved) {
                /* Bind the "device" to the cpu */
                bc->bound_on = smp_processor_id();
@@ -99,10 +102,13 @@ static enum hrtimer_restart bc_handler(struct hrtimer *t)
 {
        ce_broadcast_hrtimer.event_handler(&ce_broadcast_hrtimer);
 
-       if (ce_broadcast_hrtimer.next_event.tv64 == KTIME_MAX)
+       switch (ce_broadcast_hrtimer.mode) {
+       case CLOCK_EVT_MODE_ONESHOT:
+               if (ce_broadcast_hrtimer.next_event.tv64 != KTIME_MAX)
+                       return HRTIMER_RESTART;
+       default:
                return HRTIMER_NORESTART;
-
-       return HRTIMER_RESTART;
+       }
 }
 
 void tick_setup_hrtimer_broadcast(void)
index 7e8ca4f448a88c5ad5708106bbd889e22715b3ad..d39f32cdd1b59cca97066430442e3aa054cc9eed 100644 (file)
@@ -255,18 +255,18 @@ int tick_receive_broadcast(void)
 /*
  * Broadcast the event to the cpus, which are set in the mask (mangled).
  */
-static void tick_do_broadcast(struct cpumask *mask)
+static bool tick_do_broadcast(struct cpumask *mask)
 {
        int cpu = smp_processor_id();
        struct tick_device *td;
+       bool local = false;
 
        /*
         * Check, if the current cpu is in the mask
         */
        if (cpumask_test_cpu(cpu, mask)) {
                cpumask_clear_cpu(cpu, mask);
-               td = &per_cpu(tick_cpu_device, cpu);
-               td->evtdev->event_handler(td->evtdev);
+               local = true;
        }
 
        if (!cpumask_empty(mask)) {
@@ -279,16 +279,17 @@ static void tick_do_broadcast(struct cpumask *mask)
                td = &per_cpu(tick_cpu_device, cpumask_first(mask));
                td->evtdev->broadcast(mask);
        }
+       return local;
 }
 
 /*
  * Periodic broadcast:
  * - invoke the broadcast handlers
  */
-static void tick_do_periodic_broadcast(void)
+static bool tick_do_periodic_broadcast(void)
 {
        cpumask_and(tmpmask, cpu_online_mask, tick_broadcast_mask);
-       tick_do_broadcast(tmpmask);
+       return tick_do_broadcast(tmpmask);
 }
 
 /*
@@ -296,34 +297,26 @@ static void tick_do_periodic_broadcast(void)
  */
 static void tick_handle_periodic_broadcast(struct clock_event_device *dev)
 {
-       ktime_t next;
+       struct tick_device *td = this_cpu_ptr(&tick_cpu_device);
+       bool bc_local;
 
        raw_spin_lock(&tick_broadcast_lock);
+       bc_local = tick_do_periodic_broadcast();
 
-       tick_do_periodic_broadcast();
+       if (clockevent_state_oneshot(dev)) {
+               ktime_t next = ktime_add(dev->next_event, tick_period);
 
-       /*
-        * The device is in periodic mode. No reprogramming necessary:
-        */
-       if (dev->state == CLOCK_EVT_STATE_PERIODIC)
-               goto unlock;
+               clockevents_program_event(dev, next, true);
+       }
+       raw_spin_unlock(&tick_broadcast_lock);
 
        /*
-        * Setup the next period for devices, which do not have
-        * periodic mode. We read dev->next_event first and add to it
-        * when the event already expired. clockevents_program_event()
-        * sets dev->next_event only when the event is really
-        * programmed to the device.
+        * We run the handler of the local cpu after dropping
+        * tick_broadcast_lock because the handler might deadlock when
+        * trying to switch to oneshot mode.
         */
-       for (next = dev->next_event; ;) {
-               next = ktime_add(next, tick_period);
-
-               if (!clockevents_program_event(dev, next, false))
-                       goto unlock;
-               tick_do_periodic_broadcast();
-       }
-unlock:
-       raw_spin_unlock(&tick_broadcast_lock);
+       if (bc_local)
+               td->evtdev->event_handler(td->evtdev);
 }
 
 /**
@@ -532,23 +525,19 @@ static void tick_broadcast_set_affinity(struct clock_event_device *bc,
        irq_set_affinity(bc->irq, bc->cpumask);
 }
 
-static int tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
-                                   ktime_t expires, int force)
+static void tick_broadcast_set_event(struct clock_event_device *bc, int cpu,
+                                    ktime_t expires)
 {
-       int ret;
-
-       if (bc->state != CLOCK_EVT_STATE_ONESHOT)
-               clockevents_set_state(bc, CLOCK_EVT_STATE_ONESHOT);
+       if (!clockevent_state_oneshot(bc))
+               clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
 
-       ret = clockevents_program_event(bc, expires, force);
-       if (!ret)
-               tick_broadcast_set_affinity(bc, cpumask_of(cpu));
-       return ret;
+       clockevents_program_event(bc, expires, 1);
+       tick_broadcast_set_affinity(bc, cpumask_of(cpu));
 }
 
 static void tick_resume_broadcast_oneshot(struct clock_event_device *bc)
 {
-       clockevents_set_state(bc, CLOCK_EVT_STATE_ONESHOT);
+       clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
 }
 
 /*
@@ -566,7 +555,7 @@ void tick_check_oneshot_broadcast_this_cpu(void)
                 * switched over, leave the device alone.
                 */
                if (td->mode == TICKDEV_MODE_ONESHOT) {
-                       clockevents_set_state(td->evtdev,
+                       clockevents_switch_state(td->evtdev,
                                              CLOCK_EVT_STATE_ONESHOT);
                }
        }
@@ -580,9 +569,9 @@ static void tick_handle_oneshot_broadcast(struct clock_event_device *dev)
        struct tick_device *td;
        ktime_t now, next_event;
        int cpu, next_cpu = 0;
+       bool bc_local;
 
        raw_spin_lock(&tick_broadcast_lock);
-again:
        dev->next_event.tv64 = KTIME_MAX;
        next_event.tv64 = KTIME_MAX;
        cpumask_clear(tmpmask);
@@ -624,7 +613,7 @@ again:
        /*
         * Wakeup the cpus which have an expired event.
         */
-       tick_do_broadcast(tmpmask);
+       bc_local = tick_do_broadcast(tmpmask);
 
        /*
         * Two reasons for reprogram:
@@ -636,15 +625,15 @@ again:
         * - There are pending events on sleeping CPUs which were not
         * in the event mask
         */
-       if (next_event.tv64 != KTIME_MAX) {
-               /*
-                * Rearm the broadcast device. If event expired,
-                * repeat the above
-                */
-               if (tick_broadcast_set_event(dev, next_cpu, next_event, 0))
-                       goto again;
-       }
+       if (next_event.tv64 != KTIME_MAX)
+               tick_broadcast_set_event(dev, next_cpu, next_event);
+
        raw_spin_unlock(&tick_broadcast_lock);
+
+       if (bc_local) {
+               td = this_cpu_ptr(&tick_cpu_device);
+               td->evtdev->event_handler(td->evtdev);
+       }
 }
 
 static int broadcast_needs_cpu(struct clock_event_device *bc, int cpu)
@@ -670,7 +659,7 @@ static void broadcast_shutdown_local(struct clock_event_device *bc,
                if (dev->next_event.tv64 < bc->next_event.tv64)
                        return;
        }
-       clockevents_set_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
+       clockevents_switch_state(dev, CLOCK_EVT_STATE_SHUTDOWN);
 }
 
 /**
@@ -726,7 +715,7 @@ int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
                         */
                        if (!cpumask_test_cpu(cpu, tick_broadcast_force_mask) &&
                            dev->next_event.tv64 < bc->next_event.tv64)
-                               tick_broadcast_set_event(bc, cpu, dev->next_event, 1);
+                               tick_broadcast_set_event(bc, cpu, dev->next_event);
                }
                /*
                 * If the current CPU owns the hrtimer broadcast
@@ -740,7 +729,7 @@ int tick_broadcast_oneshot_control(enum tick_broadcast_state state)
                        cpumask_clear_cpu(cpu, tick_broadcast_oneshot_mask);
        } else {
                if (cpumask_test_and_clear_cpu(cpu, tick_broadcast_oneshot_mask)) {
-                       clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT);
+                       clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
                        /*
                         * The cpu which was handling the broadcast
                         * timer marked this cpu in the broadcast
@@ -842,7 +831,7 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
 
        /* Set it up only once ! */
        if (bc->event_handler != tick_handle_oneshot_broadcast) {
-               int was_periodic = bc->state == CLOCK_EVT_STATE_PERIODIC;
+               int was_periodic = clockevent_state_periodic(bc);
 
                bc->event_handler = tick_handle_oneshot_broadcast;
 
@@ -858,10 +847,10 @@ void tick_broadcast_setup_oneshot(struct clock_event_device *bc)
                           tick_broadcast_oneshot_mask, tmpmask);
 
                if (was_periodic && !cpumask_empty(tmpmask)) {
-                       clockevents_set_state(bc, CLOCK_EVT_STATE_ONESHOT);
+                       clockevents_switch_state(bc, CLOCK_EVT_STATE_ONESHOT);
                        tick_broadcast_init_next_event(tmpmask,
                                                       tick_next_period);
-                       tick_broadcast_set_event(bc, cpu, tick_next_period, 1);
+                       tick_broadcast_set_event(bc, cpu, tick_next_period);
                } else
                        bc->next_event.tv64 = KTIME_MAX;
        } else {
index 3ae6afa1eb98e71cc82272cd0a79a25101eff429..17f1444500500a204c48e89e9e92e7a5ebd22c91 100644 (file)
@@ -102,7 +102,17 @@ void tick_handle_periodic(struct clock_event_device *dev)
 
        tick_periodic(cpu);
 
-       if (dev->state != CLOCK_EVT_STATE_ONESHOT)
+#if defined(CONFIG_HIGH_RES_TIMERS) || defined(CONFIG_NO_HZ_COMMON)
+       /*
+        * The cpu might have transitioned to HIGHRES or NOHZ mode via
+        * update_process_times() -> run_local_timers() ->
+        * hrtimer_run_queues().
+        */
+       if (dev->event_handler != tick_handle_periodic)
+               return;
+#endif
+
+       if (!clockevent_state_oneshot(dev))
                return;
        for (;;) {
                /*
@@ -140,7 +150,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
 
        if ((dev->features & CLOCK_EVT_FEAT_PERIODIC) &&
            !tick_broadcast_oneshot_active()) {
-               clockevents_set_state(dev, CLOCK_EVT_STATE_PERIODIC);
+               clockevents_switch_state(dev, CLOCK_EVT_STATE_PERIODIC);
        } else {
                unsigned long seq;
                ktime_t next;
@@ -150,7 +160,7 @@ void tick_setup_periodic(struct clock_event_device *dev, int broadcast)
                        next = tick_next_period;
                } while (read_seqretry(&jiffies_lock, seq));
 
-               clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT);
+               clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
 
                for (;;) {
                        if (!clockevents_program_event(dev, next, false))
@@ -367,7 +377,7 @@ void tick_shutdown(unsigned int cpu)
                 * Prevent that the clock events layer tries to call
                 * the set mode function!
                 */
-               dev->state = CLOCK_EVT_STATE_DETACHED;
+               clockevent_set_state(dev, CLOCK_EVT_STATE_DETACHED);
                dev->mode = CLOCK_EVT_MODE_UNUSED;
                clockevents_exchange_device(dev, NULL);
                dev->event_handler = clockevents_handle_noop;
index b64fdd8054c56b042784fdce988ebad64f2ea803..966a5a6fdd0a03c378debc87baae72672853a6fd 100644 (file)
@@ -36,11 +36,22 @@ static inline int tick_device_is_functional(struct clock_event_device *dev)
        return !(dev->features & CLOCK_EVT_FEAT_DUMMY);
 }
 
+static inline enum clock_event_state clockevent_get_state(struct clock_event_device *dev)
+{
+       return dev->state_use_accessors;
+}
+
+static inline void clockevent_set_state(struct clock_event_device *dev,
+                                       enum clock_event_state state)
+{
+       dev->state_use_accessors = state;
+}
+
 extern void clockevents_shutdown(struct clock_event_device *dev);
 extern void clockevents_exchange_device(struct clock_event_device *old,
                                        struct clock_event_device *new);
-extern void clockevents_set_state(struct clock_event_device *dev,
-                                enum clock_event_state state);
+extern void clockevents_switch_state(struct clock_event_device *dev,
+                                    enum clock_event_state state);
 extern int clockevents_program_event(struct clock_event_device *dev,
                                     ktime_t expires, bool force);
 extern void clockevents_handle_noop(struct clock_event_device *dev);
@@ -137,3 +148,19 @@ extern void tick_nohz_init(void);
 # else
 static inline void tick_nohz_init(void) { }
 #endif
+
+#ifdef CONFIG_NO_HZ_COMMON
+extern unsigned long tick_nohz_active;
+#else
+#define tick_nohz_active (0)
+#endif
+
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+extern void timers_update_migration(bool update_nohz);
+#else
+static inline void timers_update_migration(bool update_nohz) { }
+#endif
+
+DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
+
+extern u64 get_next_timer_interrupt(unsigned long basej, u64 basem);
index 67a64b1670bfdb984c7d9edec34f7eadd04800ec..b51344652330a2b12ebb4e72875515a791a8270f 100644 (file)
@@ -28,6 +28,22 @@ int tick_program_event(ktime_t expires, int force)
 {
        struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
 
+       if (unlikely(expires.tv64 == KTIME_MAX)) {
+               /*
+                * We don't need the clock event device any more, stop it.
+                */
+               clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT_STOPPED);
+               return 0;
+       }
+
+       if (unlikely(clockevent_state_oneshot_stopped(dev))) {
+               /*
+                * We need the clock event again, configure it in ONESHOT mode
+                * before using it.
+                */
+               clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
+       }
+
        return clockevents_program_event(dev, expires, force);
 }
 
@@ -38,7 +54,7 @@ void tick_resume_oneshot(void)
 {
        struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
 
-       clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT);
+       clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
        clockevents_program_event(dev, ktime_get(), true);
 }
 
@@ -50,7 +66,7 @@ void tick_setup_oneshot(struct clock_event_device *newdev,
                        ktime_t next_event)
 {
        newdev->event_handler = handler;
-       clockevents_set_state(newdev, CLOCK_EVT_STATE_ONESHOT);
+       clockevents_switch_state(newdev, CLOCK_EVT_STATE_ONESHOT);
        clockevents_program_event(newdev, next_event, true);
 }
 
@@ -81,7 +97,7 @@ int tick_switch_to_oneshot(void (*handler)(struct clock_event_device *))
 
        td->mode = TICKDEV_MODE_ONESHOT;
        dev->event_handler = handler;
-       clockevents_set_state(dev, CLOCK_EVT_STATE_ONESHOT);
+       clockevents_switch_state(dev, CLOCK_EVT_STATE_ONESHOT);
        tick_broadcast_switch_to_oneshot();
        return 0;
 }
index 914259128145e2394e65bd36f18aaf9a81f78843..c792429e98c6de3ffb75f95f745d6cca9724585b 100644 (file)
@@ -399,7 +399,7 @@ void __init tick_nohz_init(void)
  * NO HZ enabled ?
  */
 static int tick_nohz_enabled __read_mostly  = 1;
-int tick_nohz_active  __read_mostly;
+unsigned long tick_nohz_active  __read_mostly;
 /*
  * Enable / Disable tickless mode
  */
@@ -565,156 +565,144 @@ u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time)
 }
 EXPORT_SYMBOL_GPL(get_cpu_iowait_time_us);
 
+static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
+{
+       hrtimer_cancel(&ts->sched_timer);
+       hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
+
+       /* Forward the time to expire in the future */
+       hrtimer_forward(&ts->sched_timer, now, tick_period);
+
+       if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
+               hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
+       else
+               tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
+}
+
 static ktime_t tick_nohz_stop_sched_tick(struct tick_sched *ts,
                                         ktime_t now, int cpu)
 {
-       unsigned long seq, last_jiffies, next_jiffies, delta_jiffies;
-       ktime_t last_update, expires, ret = { .tv64 = 0 };
-       unsigned long rcu_delta_jiffies;
        struct clock_event_device *dev = __this_cpu_read(tick_cpu_device.evtdev);
-       u64 time_delta;
-
-       time_delta = timekeeping_max_deferment();
+       u64 basemono, next_tick, next_tmr, next_rcu, delta, expires;
+       unsigned long seq, basejiff;
+       ktime_t tick;
 
        /* Read jiffies and the time when jiffies were updated last */
        do {
                seq = read_seqbegin(&jiffies_lock);
-               last_update = last_jiffies_update;
-               last_jiffies = jiffies;
+               basemono = last_jiffies_update.tv64;
+               basejiff = jiffies;
        } while (read_seqretry(&jiffies_lock, seq));
+       ts->last_jiffies = basejiff;
 
-       if (rcu_needs_cpu(&rcu_delta_jiffies) ||
+       if (rcu_needs_cpu(basemono, &next_rcu) ||
            arch_needs_cpu() || irq_work_needs_cpu()) {
-               next_jiffies = last_jiffies + 1;
-               delta_jiffies = 1;
+               next_tick = basemono + TICK_NSEC;
        } else {
-               /* Get the next timer wheel timer */
-               next_jiffies = get_next_timer_interrupt(last_jiffies);
-               delta_jiffies = next_jiffies - last_jiffies;
-               if (rcu_delta_jiffies < delta_jiffies) {
-                       next_jiffies = last_jiffies + rcu_delta_jiffies;
-                       delta_jiffies = rcu_delta_jiffies;
-               }
+               /*
+                * Get the next pending timer. If high resolution
+                * timers are enabled this only takes the timer wheel
+                * timers into account. If high resolution timers are
+                * disabled this also looks at the next expiring
+                * hrtimer.
+                */
+               next_tmr = get_next_timer_interrupt(basejiff, basemono);
+               ts->next_timer = next_tmr;
+               /* Take the next rcu event into account */
+               next_tick = next_rcu < next_tmr ? next_rcu : next_tmr;
        }
 
        /*
-        * Do not stop the tick, if we are only one off (or less)
-        * or if the cpu is required for RCU:
+        * If the tick is due in the next period, keep it ticking or
+        * restart it proper.
         */
-       if (!ts->tick_stopped && delta_jiffies <= 1)
-               goto out;
-
-       /* Schedule the tick, if we are at least one jiffie off */
-       if ((long)delta_jiffies >= 1) {
-
-               /*
-                * If this cpu is the one which updates jiffies, then
-                * give up the assignment and let it be taken by the
-                * cpu which runs the tick timer next, which might be
-                * this cpu as well. If we don't drop this here the
-                * jiffies might be stale and do_timer() never
-                * invoked. Keep track of the fact that it was the one
-                * which had the do_timer() duty last. If this cpu is
-                * the one which had the do_timer() duty last, we
-                * limit the sleep time to the timekeeping
-                * max_deferement value which we retrieved
-                * above. Otherwise we can sleep as long as we want.
-                */
-               if (cpu == tick_do_timer_cpu) {
-                       tick_do_timer_cpu = TICK_DO_TIMER_NONE;
-                       ts->do_timer_last = 1;
-               } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
-                       time_delta = KTIME_MAX;
-                       ts->do_timer_last = 0;
-               } else if (!ts->do_timer_last) {
-                       time_delta = KTIME_MAX;
+       delta = next_tick - basemono;
+       if (delta <= (u64)TICK_NSEC) {
+               tick.tv64 = 0;
+               if (!ts->tick_stopped)
+                       goto out;
+               if (delta == 0) {
+                       /* Tick is stopped, but required now. Enforce it */
+                       tick_nohz_restart(ts, now);
+                       goto out;
                }
+       }
+
+       /*
+        * If this cpu is the one which updates jiffies, then give up
+        * the assignment and let it be taken by the cpu which runs
+        * the tick timer next, which might be this cpu as well. If we
+        * don't drop this here the jiffies might be stale and
+        * do_timer() never invoked. Keep track of the fact that it
+        * was the one which had the do_timer() duty last. If this cpu
+        * is the one which had the do_timer() duty last, we limit the
+        * sleep time to the timekeeping max_deferement value.
+        * Otherwise we can sleep as long as we want.
+        */
+       delta = timekeeping_max_deferment();
+       if (cpu == tick_do_timer_cpu) {
+               tick_do_timer_cpu = TICK_DO_TIMER_NONE;
+               ts->do_timer_last = 1;
+       } else if (tick_do_timer_cpu != TICK_DO_TIMER_NONE) {
+               delta = KTIME_MAX;
+               ts->do_timer_last = 0;
+       } else if (!ts->do_timer_last) {
+               delta = KTIME_MAX;
+       }
 
 #ifdef CONFIG_NO_HZ_FULL
-               if (!ts->inidle) {
-                       time_delta = min(time_delta,
-                                        scheduler_tick_max_deferment());
-               }
+       /* Limit the tick delta to the maximum scheduler deferment */
+       if (!ts->inidle)
+               delta = min(delta, scheduler_tick_max_deferment());
 #endif
 
-               /*
-                * calculate the expiry time for the next timer wheel
-                * timer. delta_jiffies >= NEXT_TIMER_MAX_DELTA signals
-                * that there is no timer pending or at least extremely
-                * far into the future (12 days for HZ=1000). In this
-                * case we set the expiry to the end of time.
-                */
-               if (likely(delta_jiffies < NEXT_TIMER_MAX_DELTA)) {
-                       /*
-                        * Calculate the time delta for the next timer event.
-                        * If the time delta exceeds the maximum time delta
-                        * permitted by the current clocksource then adjust
-                        * the time delta accordingly to ensure the
-                        * clocksource does not wrap.
-                        */
-                       time_delta = min_t(u64, time_delta,
-                                          tick_period.tv64 * delta_jiffies);
-               }
-
-               if (time_delta < KTIME_MAX)
-                       expires = ktime_add_ns(last_update, time_delta);
-               else
-                       expires.tv64 = KTIME_MAX;
-
-               /* Skip reprogram of event if its not changed */
-               if (ts->tick_stopped && ktime_equal(expires, dev->next_event))
-                       goto out;
+       /* Calculate the next expiry time */
+       if (delta < (KTIME_MAX - basemono))
+               expires = basemono + delta;
+       else
+               expires = KTIME_MAX;
 
-               ret = expires;
+       expires = min_t(u64, expires, next_tick);
+       tick.tv64 = expires;
 
-               /*
-                * nohz_stop_sched_tick can be called several times before
-                * the nohz_restart_sched_tick is called. This happens when
-                * interrupts arrive which do not cause a reschedule. In the
-                * first call we save the current tick time, so we can restart
-                * the scheduler tick in nohz_restart_sched_tick.
-                */
-               if (!ts->tick_stopped) {
-                       nohz_balance_enter_idle(cpu);
-                       calc_load_enter_idle();
+       /* Skip reprogram of event if its not changed */
+       if (ts->tick_stopped && (expires == dev->next_event.tv64))
+               goto out;
 
-                       ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
-                       ts->tick_stopped = 1;
-                       trace_tick_stop(1, " ");
-               }
+       /*
+        * nohz_stop_sched_tick can be called several times before
+        * the nohz_restart_sched_tick is called. This happens when
+        * interrupts arrive which do not cause a reschedule. In the
+        * first call we save the current tick time, so we can restart
+        * the scheduler tick in nohz_restart_sched_tick.
+        */
+       if (!ts->tick_stopped) {
+               nohz_balance_enter_idle(cpu);
+               calc_load_enter_idle();
 
-               /*
-                * If the expiration time == KTIME_MAX, then
-                * in this case we simply stop the tick timer.
-                */
-                if (unlikely(expires.tv64 == KTIME_MAX)) {
-                       if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
-                               hrtimer_cancel(&ts->sched_timer);
-                       goto out;
-               }
+               ts->last_tick = hrtimer_get_expires(&ts->sched_timer);
+               ts->tick_stopped = 1;
+               trace_tick_stop(1, " ");
+       }
 
-               if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
-                       hrtimer_start(&ts->sched_timer, expires,
-                                     HRTIMER_MODE_ABS_PINNED);
-                       /* Check, if the timer was already in the past */
-                       if (hrtimer_active(&ts->sched_timer))
-                               goto out;
-               } else if (!tick_program_event(expires, 0))
-                               goto out;
-               /*
-                * We are past the event already. So we crossed a
-                * jiffie boundary. Update jiffies and raise the
-                * softirq.
-                */
-               tick_do_update_jiffies64(ktime_get());
+       /*
+        * If the expiration time == KTIME_MAX, then we simply stop
+        * the tick timer.
+        */
+       if (unlikely(expires == KTIME_MAX)) {
+               if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
+                       hrtimer_cancel(&ts->sched_timer);
+               goto out;
        }
-       raise_softirq_irqoff(TIMER_SOFTIRQ);
+
+       if (ts->nohz_mode == NOHZ_MODE_HIGHRES)
+               hrtimer_start(&ts->sched_timer, tick, HRTIMER_MODE_ABS_PINNED);
+       else
+               tick_program_event(tick, 1);
 out:
-       ts->next_jiffies = next_jiffies;
-       ts->last_jiffies = last_jiffies;
+       /* Update the estimated sleep length */
        ts->sleep_length = ktime_sub(dev->next_event, now);
-
-       return ret;
+       return tick;
 }
 
 static void tick_nohz_full_stop_tick(struct tick_sched *ts)
@@ -876,32 +864,6 @@ ktime_t tick_nohz_get_sleep_length(void)
        return ts->sleep_length;
 }
 
-static void tick_nohz_restart(struct tick_sched *ts, ktime_t now)
-{
-       hrtimer_cancel(&ts->sched_timer);
-       hrtimer_set_expires(&ts->sched_timer, ts->last_tick);
-
-       while (1) {
-               /* Forward the time to expire in the future */
-               hrtimer_forward(&ts->sched_timer, now, tick_period);
-
-               if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
-                       hrtimer_start_expires(&ts->sched_timer,
-                                             HRTIMER_MODE_ABS_PINNED);
-                       /* Check, if the timer was already in the past */
-                       if (hrtimer_active(&ts->sched_timer))
-                               break;
-               } else {
-                       if (!tick_program_event(
-                               hrtimer_get_expires(&ts->sched_timer), 0))
-                               break;
-               }
-               /* Reread time and update jiffies */
-               now = ktime_get();
-               tick_do_update_jiffies64(now);
-       }
-}
-
 static void tick_nohz_restart_sched_tick(struct tick_sched *ts, ktime_t now)
 {
        /* Update jiffies first */
@@ -972,12 +934,6 @@ void tick_nohz_idle_exit(void)
        local_irq_enable();
 }
 
-static int tick_nohz_reprogram(struct tick_sched *ts, ktime_t now)
-{
-       hrtimer_forward(&ts->sched_timer, now, tick_period);
-       return tick_program_event(hrtimer_get_expires(&ts->sched_timer), 0);
-}
-
 /*
  * The nohz low res interrupt handler
  */
@@ -996,10 +952,18 @@ static void tick_nohz_handler(struct clock_event_device *dev)
        if (unlikely(ts->tick_stopped))
                return;
 
-       while (tick_nohz_reprogram(ts, now)) {
-               now = ktime_get();
-               tick_do_update_jiffies64(now);
-       }
+       hrtimer_forward(&ts->sched_timer, now, tick_period);
+       tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
+}
+
+static inline void tick_nohz_activate(struct tick_sched *ts, int mode)
+{
+       if (!tick_nohz_enabled)
+               return;
+       ts->nohz_mode = mode;
+       /* One update is enough */
+       if (!test_and_set_bit(0, &tick_nohz_active))
+               timers_update_migration(true);
 }
 
 /**
@@ -1013,13 +977,8 @@ static void tick_nohz_switch_to_nohz(void)
        if (!tick_nohz_enabled)
                return;
 
-       local_irq_disable();
-       if (tick_switch_to_oneshot(tick_nohz_handler)) {
-               local_irq_enable();
+       if (tick_switch_to_oneshot(tick_nohz_handler))
                return;
-       }
-       tick_nohz_active = 1;
-       ts->nohz_mode = NOHZ_MODE_LOWRES;
 
        /*
         * Recycle the hrtimer in ts, so we can share the
@@ -1029,13 +988,10 @@ static void tick_nohz_switch_to_nohz(void)
        /* Get the next period */
        next = tick_init_jiffy_update();
 
-       for (;;) {
-               hrtimer_set_expires(&ts->sched_timer, next);
-               if (!tick_program_event(next, 0))
-                       break;
-               next = ktime_add(next, tick_period);
-       }
-       local_irq_enable();
+       hrtimer_forward_now(&ts->sched_timer, tick_period);
+       hrtimer_set_expires(&ts->sched_timer, next);
+       tick_program_event(next, 1);
+       tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
 }
 
 /*
@@ -1087,6 +1043,7 @@ static inline void tick_nohz_irq_enter(void)
 
 static inline void tick_nohz_switch_to_nohz(void) { }
 static inline void tick_nohz_irq_enter(void) { }
+static inline void tick_nohz_activate(struct tick_sched *ts, int mode) { }
 
 #endif /* CONFIG_NO_HZ_COMMON */
 
@@ -1167,22 +1124,9 @@ void tick_setup_sched_timer(void)
                hrtimer_add_expires_ns(&ts->sched_timer, offset);
        }
 
-       for (;;) {
-               hrtimer_forward(&ts->sched_timer, now, tick_period);
-               hrtimer_start_expires(&ts->sched_timer,
-                                     HRTIMER_MODE_ABS_PINNED);
-               /* Check, if the timer was already in the past */
-               if (hrtimer_active(&ts->sched_timer))
-                       break;
-               now = ktime_get();
-       }
-
-#ifdef CONFIG_NO_HZ_COMMON
-       if (tick_nohz_enabled) {
-               ts->nohz_mode = NOHZ_MODE_HIGHRES;
-               tick_nohz_active = 1;
-       }
-#endif
+       hrtimer_forward(&ts->sched_timer, now, tick_period);
+       hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED);
+       tick_nohz_activate(ts, NOHZ_MODE_HIGHRES);
 }
 #endif /* HIGH_RES_TIMERS */
 
@@ -1227,7 +1171,7 @@ void tick_oneshot_notify(void)
  * Called cyclic from the hrtimer softirq (driven by the timer
  * softirq) allow_nohz signals, that we can switch into low-res nohz
  * mode, because high resolution timers are disabled (either compile
- * or runtime).
+ * or runtime). Called with interrupts disabled.
  */
 int tick_check_oneshot_change(int allow_nohz)
 {
index 28b5da3e1a176e62c081e965fbfc32090d3f1e74..42fdf4958bccd1c5d1593f4ebe578ea99477ddbf 100644 (file)
@@ -57,7 +57,7 @@ struct tick_sched {
        ktime_t                         iowait_sleeptime;
        ktime_t                         sleep_length;
        unsigned long                   last_jiffies;
-       unsigned long                   next_jiffies;
+       u64                             next_timer;
        ktime_t                         idle_expires;
        int                             do_timer_last;
 };
index 2c85b7724af4b0081a112e1b12cbcce4ef831117..85d5bb1d67ebc777e9dab638d3988b2fdaedf172 100644 (file)
@@ -41,7 +41,7 @@
 #include <asm/uaccess.h>
 #include <asm/unistd.h>
 
-#include "timeconst.h"
+#include <generated/timeconst.h>
 #include "timekeeping.h"
 
 /*
@@ -173,6 +173,10 @@ int do_sys_settimeofday(const struct timespec *tv, const struct timezone *tz)
                return error;
 
        if (tz) {
+               /* Verify we're witin the +-15 hrs range */
+               if (tz->tz_minuteswest > 15*60 || tz->tz_minuteswest < -15*60)
+                       return -EINVAL;
+
                sys_tz = *tz;
                update_vsyscall_tz();
                if (firsttime) {
@@ -483,9 +487,11 @@ struct timespec64 ns_to_timespec64(const s64 nsec)
 }
 EXPORT_SYMBOL(ns_to_timespec64);
 #endif
-/*
- * When we convert to jiffies then we interpret incoming values
- * the following way:
+/**
+ * msecs_to_jiffies: - convert milliseconds to jiffies
+ * @m: time in milliseconds
+ *
+ * conversion is done as follows:
  *
  * - negative values mean 'infinite timeout' (MAX_JIFFY_OFFSET)
  *
@@ -493,66 +499,36 @@ EXPORT_SYMBOL(ns_to_timespec64);
  *   MAX_JIFFY_OFFSET values] mean 'infinite timeout' too.
  *
  * - all other values are converted to jiffies by either multiplying
- *   the input value by a factor or dividing it with a factor
- *
- * We must also be careful about 32-bit overflows.
+ *   the input value by a factor or dividing it with a factor and
+ *   handling any 32-bit overflows.
+ *   for the details see __msecs_to_jiffies()
+ *
+ * msecs_to_jiffies() checks for the passed in value being a constant
+ * via __builtin_constant_p() allowing gcc to eliminate most of the
+ * code, __msecs_to_jiffies() is called if the value passed does not
+ * allow constant folding and the actual conversion must be done at
+ * runtime.
+ * the _msecs_to_jiffies helpers are the HZ dependent conversion
+ * routines found in include/linux/jiffies.h
  */
-unsigned long msecs_to_jiffies(const unsigned int m)
+unsigned long __msecs_to_jiffies(const unsigned int m)
 {
        /*
         * Negative value, means infinite timeout:
         */
        if ((int)m < 0)
                return MAX_JIFFY_OFFSET;
-
-#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ)
-       /*
-        * HZ is equal to or smaller than 1000, and 1000 is a nice
-        * round multiple of HZ, divide with the factor between them,
-        * but round upwards:
-        */
-       return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ);
-#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC)
-       /*
-        * HZ is larger than 1000, and HZ is a nice round multiple of
-        * 1000 - simply multiply with the factor between them.
-        *
-        * But first make sure the multiplication result cannot
-        * overflow:
-        */
-       if (m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
-               return MAX_JIFFY_OFFSET;
-
-       return m * (HZ / MSEC_PER_SEC);
-#else
-       /*
-        * Generic case - multiply, round and divide. But first
-        * check that if we are doing a net multiplication, that
-        * we wouldn't overflow:
-        */
-       if (HZ > MSEC_PER_SEC && m > jiffies_to_msecs(MAX_JIFFY_OFFSET))
-               return MAX_JIFFY_OFFSET;
-
-       return (MSEC_TO_HZ_MUL32 * m + MSEC_TO_HZ_ADJ32)
-               >> MSEC_TO_HZ_SHR32;
-#endif
+       return _msecs_to_jiffies(m);
 }
-EXPORT_SYMBOL(msecs_to_jiffies);
+EXPORT_SYMBOL(__msecs_to_jiffies);
 
-unsigned long usecs_to_jiffies(const unsigned int u)
+unsigned long __usecs_to_jiffies(const unsigned int u)
 {
        if (u > jiffies_to_usecs(MAX_JIFFY_OFFSET))
                return MAX_JIFFY_OFFSET;
-#if HZ <= USEC_PER_SEC && !(USEC_PER_SEC % HZ)
-       return (u + (USEC_PER_SEC / HZ) - 1) / (USEC_PER_SEC / HZ);
-#elif HZ > USEC_PER_SEC && !(HZ % USEC_PER_SEC)
-       return u * (HZ / USEC_PER_SEC);
-#else
-       return (USEC_TO_HZ_MUL32 * u + USEC_TO_HZ_ADJ32)
-               >> USEC_TO_HZ_SHR32;
-#endif
+       return _usecs_to_jiffies(u);
 }
-EXPORT_SYMBOL(usecs_to_jiffies);
+EXPORT_SYMBOL(__usecs_to_jiffies);
 
 /*
  * The TICK_NSEC - 1 rounds up the value to the next resolution.  Note
index 511bdf2cafdaa2794834ec1695ace5a587cf2022..c7388dee86358ae46967a20f39ffab6f44080d16 100644 (file)
@@ -50,7 +50,7 @@ define timeconst(hz) {
        print "#include <linux/types.h>\n\n"
 
        print "#if HZ != ", hz, "\n"
-       print "#error \qkernel/timeconst.h has the wrong HZ value!\q\n"
+       print "#error \qinclude/generated/timeconst.h has the wrong HZ value!\q\n"
        print "#endif\n\n"
 
        if (hz < 2) {
@@ -105,4 +105,5 @@ define timeconst(hz) {
        halt
 }
 
+hz = read();
 timeconst(hz)
index 946acb72179facb1c173e54592b3c1c3637f8abd..30b7a409bf1ea19001e3eeb966679137bfe74029 100644 (file)
@@ -118,18 +118,6 @@ static inline void tk_update_sleep_time(struct timekeeper *tk, ktime_t delta)
 
 #ifdef CONFIG_DEBUG_TIMEKEEPING
 #define WARNING_FREQ (HZ*300) /* 5 minute rate-limiting */
-/*
- * These simple flag variables are managed
- * without locks, which is racy, but ok since
- * we don't really care about being super
- * precise about how many events were seen,
- * just that a problem was observed.
- */
-static int timekeeping_underflow_seen;
-static int timekeeping_overflow_seen;
-
-/* last_warning is only modified under the timekeeping lock */
-static long timekeeping_last_warning;
 
 static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
 {
@@ -149,29 +137,30 @@ static void timekeeping_check_update(struct timekeeper *tk, cycle_t offset)
                }
        }
 
-       if (timekeeping_underflow_seen) {
-               if (jiffies - timekeeping_last_warning > WARNING_FREQ) {
+       if (tk->underflow_seen) {
+               if (jiffies - tk->last_warning > WARNING_FREQ) {
                        printk_deferred("WARNING: Underflow in clocksource '%s' observed, time update ignored.\n", name);
                        printk_deferred("         Please report this, consider using a different clocksource, if possible.\n");
                        printk_deferred("         Your kernel is probably still fine.\n");
-                       timekeeping_last_warning = jiffies;
+                       tk->last_warning = jiffies;
                }
-               timekeeping_underflow_seen = 0;
+               tk->underflow_seen = 0;
        }
 
-       if (timekeeping_overflow_seen) {
-               if (jiffies - timekeeping_last_warning > WARNING_FREQ) {
+       if (tk->overflow_seen) {
+               if (jiffies - tk->last_warning > WARNING_FREQ) {
                        printk_deferred("WARNING: Overflow in clocksource '%s' observed, time update capped.\n", name);
                        printk_deferred("         Please report this, consider using a different clocksource, if possible.\n");
                        printk_deferred("         Your kernel is probably still fine.\n");
-                       timekeeping_last_warning = jiffies;
+                       tk->last_warning = jiffies;
                }
-               timekeeping_overflow_seen = 0;
+               tk->overflow_seen = 0;
        }
 }
 
 static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
 {
+       struct timekeeper *tk = &tk_core.timekeeper;
        cycle_t now, last, mask, max, delta;
        unsigned int seq;
 
@@ -197,13 +186,13 @@ static inline cycle_t timekeeping_get_delta(struct tk_read_base *tkr)
         * mask-relative negative values.
         */
        if (unlikely((~delta & mask) < (mask >> 3))) {
-               timekeeping_underflow_seen = 1;
+               tk->underflow_seen = 1;
                delta = 0;
        }
 
        /* Cap delta value to the max_cycles values to avoid mult overflows */
        if (unlikely(delta > max)) {
-               timekeeping_overflow_seen = 1;
+               tk->overflow_seen = 1;
                delta = tkr->clock->max_cycles;
        }
 
@@ -550,6 +539,17 @@ int pvclock_gtod_unregister_notifier(struct notifier_block *nb)
 }
 EXPORT_SYMBOL_GPL(pvclock_gtod_unregister_notifier);
 
+/*
+ * tk_update_leap_state - helper to update the next_leap_ktime
+ */
+static inline void tk_update_leap_state(struct timekeeper *tk)
+{
+       tk->next_leap_ktime = ntp_get_next_leap();
+       if (tk->next_leap_ktime.tv64 != KTIME_MAX)
+               /* Convert to monotonic time */
+               tk->next_leap_ktime = ktime_sub(tk->next_leap_ktime, tk->offs_real);
+}
+
 /*
  * Update the ktime_t based scalar nsec members of the timekeeper
  */
@@ -591,17 +591,25 @@ static void timekeeping_update(struct timekeeper *tk, unsigned int action)
                ntp_clear();
        }
 
+       tk_update_leap_state(tk);
        tk_update_ktime_data(tk);
 
        update_vsyscall(tk);
        update_pvclock_gtod(tk, action & TK_CLOCK_WAS_SET);
 
+       update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
+       update_fast_timekeeper(&tk->tkr_raw,  &tk_fast_raw);
+
+       if (action & TK_CLOCK_WAS_SET)
+               tk->clock_was_set_seq++;
+       /*
+        * The mirroring of the data to the shadow-timekeeper needs
+        * to happen last here to ensure we don't over-write the
+        * timekeeper structure on the next update with stale data
+        */
        if (action & TK_MIRROR)
                memcpy(&shadow_timekeeper, &tk_core.timekeeper,
                       sizeof(tk_core.timekeeper));
-
-       update_fast_timekeeper(&tk->tkr_mono, &tk_fast_mono);
-       update_fast_timekeeper(&tk->tkr_raw,  &tk_fast_raw);
 }
 
 /**
@@ -699,6 +707,23 @@ ktime_t ktime_get(void)
 }
 EXPORT_SYMBOL_GPL(ktime_get);
 
+u32 ktime_get_resolution_ns(void)
+{
+       struct timekeeper *tk = &tk_core.timekeeper;
+       unsigned int seq;
+       u32 nsecs;
+
+       WARN_ON(timekeeping_suspended);
+
+       do {
+               seq = read_seqcount_begin(&tk_core.seq);
+               nsecs = tk->tkr_mono.mult >> tk->tkr_mono.shift;
+       } while (read_seqcount_retry(&tk_core.seq, seq));
+
+       return nsecs;
+}
+EXPORT_SYMBOL_GPL(ktime_get_resolution_ns);
+
 static ktime_t *offsets[TK_OFFS_MAX] = {
        [TK_OFFS_REAL]  = &tk_core.timekeeper.offs_real,
        [TK_OFFS_BOOT]  = &tk_core.timekeeper.offs_boot,
@@ -1179,28 +1204,20 @@ void __weak read_persistent_clock64(struct timespec64 *ts64)
 }
 
 /**
- * read_boot_clock -  Return time of the system start.
+ * read_boot_clock64 -  Return time of the system start.
  *
  * Weak dummy function for arches that do not yet support it.
  * Function to read the exact time the system has been started.
- * Returns a timespec with tv_sec=0 and tv_nsec=0 if unsupported.
+ * Returns a timespec64 with tv_sec=0 and tv_nsec=0 if unsupported.
  *
  *  XXX - Do be sure to remove it once all arches implement it.
  */
-void __weak read_boot_clock(struct timespec *ts)
+void __weak read_boot_clock64(struct timespec64 *ts)
 {
        ts->tv_sec = 0;
        ts->tv_nsec = 0;
 }
 
-void __weak read_boot_clock64(struct timespec64 *ts64)
-{
-       struct timespec ts;
-
-       read_boot_clock(&ts);
-       *ts64 = timespec_to_timespec64(ts);
-}
-
 /* Flag for if timekeeping_resume() has injected sleeptime */
 static bool sleeptime_injected;
 
@@ -1836,8 +1853,9 @@ void update_wall_time(void)
         * memcpy under the tk_core.seq against one before we start
         * updating.
         */
+       timekeeping_update(tk, clock_set);
        memcpy(real_tk, tk, sizeof(*tk));
-       timekeeping_update(real_tk, clock_set);
+       /* The memcpy must come last. Do not put anything here! */
        write_seqcount_end(&tk_core.seq);
 out:
        raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
@@ -1925,48 +1943,21 @@ void do_timer(unsigned long ticks)
        calc_global_load(ticks);
 }
 
-/**
- * ktime_get_update_offsets_tick - hrtimer helper
- * @offs_real: pointer to storage for monotonic -> realtime offset
- * @offs_boot: pointer to storage for monotonic -> boottime offset
- * @offs_tai:  pointer to storage for monotonic -> clock tai offset
- *
- * Returns monotonic time at last tick and various offsets
- */
-ktime_t ktime_get_update_offsets_tick(ktime_t *offs_real, ktime_t *offs_boot,
-                                                       ktime_t *offs_tai)
-{
-       struct timekeeper *tk = &tk_core.timekeeper;
-       unsigned int seq;
-       ktime_t base;
-       u64 nsecs;
-
-       do {
-               seq = read_seqcount_begin(&tk_core.seq);
-
-               base = tk->tkr_mono.base;
-               nsecs = tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift;
-
-               *offs_real = tk->offs_real;
-               *offs_boot = tk->offs_boot;
-               *offs_tai = tk->offs_tai;
-       } while (read_seqcount_retry(&tk_core.seq, seq));
-
-       return ktime_add_ns(base, nsecs);
-}
-
-#ifdef CONFIG_HIGH_RES_TIMERS
 /**
  * ktime_get_update_offsets_now - hrtimer helper
+ * @cwsseq:    pointer to check and store the clock was set sequence number
  * @offs_real: pointer to storage for monotonic -> realtime offset
  * @offs_boot: pointer to storage for monotonic -> boottime offset
  * @offs_tai:  pointer to storage for monotonic -> clock tai offset
  *
- * Returns current monotonic time and updates the offsets
+ * Returns current monotonic time and updates the offsets if the
+ * sequence number in @cwsseq and timekeeper.clock_was_set_seq are
+ * different.
+ *
  * Called from hrtimer_interrupt() or retrigger_next_event()
  */
-ktime_t ktime_get_update_offsets_now(ktime_t *offs_real, ktime_t *offs_boot,
-                                                       ktime_t *offs_tai)
+ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq, ktime_t *offs_real,
+                                    ktime_t *offs_boot, ktime_t *offs_tai)
 {
        struct timekeeper *tk = &tk_core.timekeeper;
        unsigned int seq;
@@ -1978,15 +1969,23 @@ ktime_t ktime_get_update_offsets_now(ktime_t *offs_real, ktime_t *offs_boot,
 
                base = tk->tkr_mono.base;
                nsecs = timekeeping_get_ns(&tk->tkr_mono);
+               base = ktime_add_ns(base, nsecs);
+
+               if (*cwsseq != tk->clock_was_set_seq) {
+                       *cwsseq = tk->clock_was_set_seq;
+                       *offs_real = tk->offs_real;
+                       *offs_boot = tk->offs_boot;
+                       *offs_tai = tk->offs_tai;
+               }
+
+               /* Handle leapsecond insertion adjustments */
+               if (unlikely(base.tv64 >= tk->next_leap_ktime.tv64))
+                       *offs_real = ktime_sub(tk->offs_real, ktime_set(1, 0));
 
-               *offs_real = tk->offs_real;
-               *offs_boot = tk->offs_boot;
-               *offs_tai = tk->offs_tai;
        } while (read_seqcount_retry(&tk_core.seq, seq));
 
-       return ktime_add_ns(base, nsecs);
+       return base;
 }
-#endif
 
 /**
  * do_adjtimex() - Accessor function to NTP __do_adjtimex function
@@ -2027,6 +2026,8 @@ int do_adjtimex(struct timex *txc)
                __timekeeping_set_tai_offset(tk, tai);
                timekeeping_update(tk, TK_MIRROR | TK_CLOCK_WAS_SET);
        }
+       tk_update_leap_state(tk);
+
        write_seqcount_end(&tk_core.seq);
        raw_spin_unlock_irqrestore(&timekeeper_lock, flags);
 
index ead8794b9a4e470242d37684fd04079ffbd70dec..704f595ce83f03090f3f6d63a4d792703b4084a4 100644 (file)
@@ -3,19 +3,16 @@
 /*
  * Internal interfaces for kernel/time/
  */
-extern ktime_t ktime_get_update_offsets_tick(ktime_t *offs_real,
-                                               ktime_t *offs_boot,
-                                               ktime_t *offs_tai);
-extern ktime_t ktime_get_update_offsets_now(ktime_t *offs_real,
-                                               ktime_t *offs_boot,
-                                               ktime_t *offs_tai);
+extern ktime_t ktime_get_update_offsets_now(unsigned int *cwsseq,
+                                           ktime_t *offs_real,
+                                           ktime_t *offs_boot,
+                                           ktime_t *offs_tai);
 
 extern int timekeeping_valid_for_hres(void);
 extern u64 timekeeping_max_deferment(void);
 extern int timekeeping_inject_offset(struct timespec *ts);
 extern s32 timekeeping_get_tai_offset(void);
 extern void timekeeping_set_tai_offset(s32 tai_offset);
-extern void timekeeping_clocktai(struct timespec *ts);
 extern int timekeeping_suspend(void);
 extern void timekeeping_resume(void);
 
index 2ece3aa5069cade64b8c4982e920a45bea5ba232..520499dd85af42e96b2bbd8c729df36d238ad27a 100644 (file)
@@ -49,6 +49,8 @@
 #include <asm/timex.h>
 #include <asm/io.h>
 
+#include "tick-internal.h"
+
 #define CREATE_TRACE_POINTS
 #include <trace/events/timer.h>
 
@@ -68,11 +70,11 @@ EXPORT_SYMBOL(jiffies_64);
 #define MAX_TVAL ((unsigned long)((1ULL << (TVR_BITS + 4*TVN_BITS)) - 1))
 
 struct tvec {
-       struct list_head vec[TVN_SIZE];
+       struct hlist_head vec[TVN_SIZE];
 };
 
 struct tvec_root {
-       struct list_head vec[TVR_SIZE];
+       struct hlist_head vec[TVR_SIZE];
 };
 
 struct tvec_base {
@@ -83,6 +85,8 @@ struct tvec_base {
        unsigned long active_timers;
        unsigned long all_timers;
        int cpu;
+       bool migration_enabled;
+       bool nohz_active;
        struct tvec_root tv1;
        struct tvec tv2;
        struct tvec tv3;
@@ -90,43 +94,60 @@ struct tvec_base {
        struct tvec tv5;
 } ____cacheline_aligned;
 
-/*
- * __TIMER_INITIALIZER() needs to set ->base to a valid pointer (because we've
- * made NULL special, hint: lock_timer_base()) and we cannot get a compile time
- * pointer to per-cpu entries because we don't know where we'll map the section,
- * even for the boot cpu.
- *
- * And so we use boot_tvec_bases for boot CPU and per-cpu __tvec_bases for the
- * rest of them.
- */
-struct tvec_base boot_tvec_bases;
-EXPORT_SYMBOL(boot_tvec_bases);
 
-static DEFINE_PER_CPU(struct tvec_base *, tvec_bases) = &boot_tvec_bases;
+static DEFINE_PER_CPU(struct tvec_base, tvec_bases);
+
+#if defined(CONFIG_SMP) && defined(CONFIG_NO_HZ_COMMON)
+unsigned int sysctl_timer_migration = 1;
 
-/* Functions below help us manage 'deferrable' flag */
-static inline unsigned int tbase_get_deferrable(struct tvec_base *base)
+void timers_update_migration(bool update_nohz)
 {
-       return ((unsigned int)(unsigned long)base & TIMER_DEFERRABLE);
+       bool on = sysctl_timer_migration && tick_nohz_active;
+       unsigned int cpu;
+
+       /* Avoid the loop, if nothing to update */
+       if (this_cpu_read(tvec_bases.migration_enabled) == on)
+               return;
+
+       for_each_possible_cpu(cpu) {
+               per_cpu(tvec_bases.migration_enabled, cpu) = on;
+               per_cpu(hrtimer_bases.migration_enabled, cpu) = on;
+               if (!update_nohz)
+                       continue;
+               per_cpu(tvec_bases.nohz_active, cpu) = true;
+               per_cpu(hrtimer_bases.nohz_active, cpu) = true;
+       }
 }
 
-static inline unsigned int tbase_get_irqsafe(struct tvec_base *base)
+int timer_migration_handler(struct ctl_table *table, int write,
+                           void __user *buffer, size_t *lenp,
+                           loff_t *ppos)
 {
-       return ((unsigned int)(unsigned long)base & TIMER_IRQSAFE);
+       static DEFINE_MUTEX(mutex);
+       int ret;
+
+       mutex_lock(&mutex);
+       ret = proc_dointvec(table, write, buffer, lenp, ppos);
+       if (!ret && write)
+               timers_update_migration(false);
+       mutex_unlock(&mutex);
+       return ret;
 }
 
-static inline struct tvec_base *tbase_get_base(struct tvec_base *base)
+static inline struct tvec_base *get_target_base(struct tvec_base *base,
+                                               int pinned)
 {
-       return ((struct tvec_base *)((unsigned long)base & ~TIMER_FLAG_MASK));
+       if (pinned || !base->migration_enabled)
+               return this_cpu_ptr(&tvec_bases);
+       return per_cpu_ptr(&tvec_bases, get_nohz_timer_target());
 }
-
-static inline void
-timer_set_base(struct timer_list *timer, struct tvec_base *new_base)
+#else
+static inline struct tvec_base *get_target_base(struct tvec_base *base,
+                                               int pinned)
 {
-       unsigned long flags = (unsigned long)timer->base & TIMER_FLAG_MASK;
-
-       timer->base = (struct tvec_base *)((unsigned long)(new_base) | flags);
+       return this_cpu_ptr(&tvec_bases);
 }
+#endif
 
 static unsigned long round_jiffies_common(unsigned long j, int cpu,
                bool force_up)
@@ -349,26 +370,12 @@ void set_timer_slack(struct timer_list *timer, int slack_hz)
 }
 EXPORT_SYMBOL_GPL(set_timer_slack);
 
-/*
- * If the list is empty, catch up ->timer_jiffies to the current time.
- * The caller must hold the tvec_base lock.  Returns true if the list
- * was empty and therefore ->timer_jiffies was updated.
- */
-static bool catchup_timer_jiffies(struct tvec_base *base)
-{
-       if (!base->all_timers) {
-               base->timer_jiffies = jiffies;
-               return true;
-       }
-       return false;
-}
-
 static void
 __internal_add_timer(struct tvec_base *base, struct timer_list *timer)
 {
        unsigned long expires = timer->expires;
        unsigned long idx = expires - base->timer_jiffies;
-       struct list_head *vec;
+       struct hlist_head *vec;
 
        if (idx < TVR_SIZE) {
                int i = expires & TVR_MASK;
@@ -401,25 +408,25 @@ __internal_add_timer(struct tvec_base *base, struct timer_list *timer)
                i = (expires >> (TVR_BITS + 3 * TVN_BITS)) & TVN_MASK;
                vec = base->tv5.vec + i;
        }
-       /*
-        * Timers are FIFO:
-        */
-       list_add_tail(&timer->entry, vec);
+
+       hlist_add_head(&timer->entry, vec);
 }
 
 static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
 {
-       (void)catchup_timer_jiffies(base);
+       /* Advance base->jiffies, if the base is empty */
+       if (!base->all_timers++)
+               base->timer_jiffies = jiffies;
+
        __internal_add_timer(base, timer);
        /*
         * Update base->active_timers and base->next_timer
         */
-       if (!tbase_get_deferrable(timer->base)) {
+       if (!(timer->flags & TIMER_DEFERRABLE)) {
                if (!base->active_timers++ ||
                    time_before(timer->expires, base->next_timer))
                        base->next_timer = timer->expires;
        }
-       base->all_timers++;
 
        /*
         * Check whether the other CPU is in dynticks mode and needs
@@ -434,8 +441,11 @@ static void internal_add_timer(struct tvec_base *base, struct timer_list *timer)
         * require special care against races with idle_cpu(), lets deal
         * with that later.
         */
-       if (!tbase_get_deferrable(base) || tick_nohz_full_cpu(base->cpu))
-               wake_up_nohz_cpu(base->cpu);
+       if (base->nohz_active) {
+               if (!(timer->flags & TIMER_DEFERRABLE) ||
+                   tick_nohz_full_cpu(base->cpu))
+                       wake_up_nohz_cpu(base->cpu);
+       }
 }
 
 #ifdef CONFIG_TIMER_STATS
@@ -451,15 +461,12 @@ void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
 
 static void timer_stats_account_timer(struct timer_list *timer)
 {
-       unsigned int flag = 0;
-
        if (likely(!timer->start_site))
                return;
-       if (unlikely(tbase_get_deferrable(timer->base)))
-               flag |= TIMER_STATS_FLAG_DEFERRABLE;
 
        timer_stats_update_stats(timer, timer->start_pid, timer->start_site,
-                                timer->function, timer->start_comm, flag);
+                                timer->function, timer->start_comm,
+                                timer->flags);
 }
 
 #else
@@ -516,8 +523,8 @@ static int timer_fixup_activate(void *addr, enum debug_obj_state state)
                 * statically initialized. We just make sure that it
                 * is tracked in the object tracker.
                 */
-               if (timer->entry.next == NULL &&
-                   timer->entry.prev == TIMER_ENTRY_STATIC) {
+               if (timer->entry.pprev == NULL &&
+                   timer->entry.next == TIMER_ENTRY_STATIC) {
                        debug_object_init(timer, &timer_debug_descr);
                        debug_object_activate(timer, &timer_debug_descr);
                        return 0;
@@ -563,7 +570,7 @@ static int timer_fixup_assert_init(void *addr, enum debug_obj_state state)
 
        switch (state) {
        case ODEBUG_STATE_NOTAVAILABLE:
-               if (timer->entry.prev == TIMER_ENTRY_STATIC) {
+               if (timer->entry.next == TIMER_ENTRY_STATIC) {
                        /*
                         * This is not really a fixup. The timer was
                         * statically initialized. We just make sure that it
@@ -648,7 +655,7 @@ static inline void
 debug_activate(struct timer_list *timer, unsigned long expires)
 {
        debug_timer_activate(timer);
-       trace_timer_start(timer, expires);
+       trace_timer_start(timer, expires, timer->flags);
 }
 
 static inline void debug_deactivate(struct timer_list *timer)
@@ -665,10 +672,8 @@ static inline void debug_assert_init(struct timer_list *timer)
 static void do_init_timer(struct timer_list *timer, unsigned int flags,
                          const char *name, struct lock_class_key *key)
 {
-       struct tvec_base *base = raw_cpu_read(tvec_bases);
-
-       timer->entry.next = NULL;
-       timer->base = (void *)((unsigned long)base | flags);
+       timer->entry.pprev = NULL;
+       timer->flags = flags | raw_smp_processor_id();
        timer->slack = -1;
 #ifdef CONFIG_TIMER_STATS
        timer->start_site = NULL;
@@ -699,24 +704,23 @@ EXPORT_SYMBOL(init_timer_key);
 
 static inline void detach_timer(struct timer_list *timer, bool clear_pending)
 {
-       struct list_head *entry = &timer->entry;
+       struct hlist_node *entry = &timer->entry;
 
        debug_deactivate(timer);
 
-       __list_del(entry->prev, entry->next);
+       __hlist_del(entry);
        if (clear_pending)
-               entry->next = NULL;
-       entry->prev = LIST_POISON2;
+               entry->pprev = NULL;
+       entry->next = LIST_POISON2;
 }
 
 static inline void
 detach_expired_timer(struct timer_list *timer, struct tvec_base *base)
 {
        detach_timer(timer, true);
-       if (!tbase_get_deferrable(timer->base))
+       if (!(timer->flags & TIMER_DEFERRABLE))
                base->active_timers--;
        base->all_timers--;
-       (void)catchup_timer_jiffies(base);
 }
 
 static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
@@ -726,13 +730,14 @@ static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
                return 0;
 
        detach_timer(timer, clear_pending);
-       if (!tbase_get_deferrable(timer->base)) {
+       if (!(timer->flags & TIMER_DEFERRABLE)) {
                base->active_timers--;
                if (timer->expires == base->next_timer)
                        base->next_timer = base->timer_jiffies;
        }
-       base->all_timers--;
-       (void)catchup_timer_jiffies(base);
+       /* If this was the last timer, advance base->jiffies */
+       if (!--base->all_timers)
+               base->timer_jiffies = jiffies;
        return 1;
 }
 
@@ -744,24 +749,22 @@ static int detach_if_pending(struct timer_list *timer, struct tvec_base *base,
  * So __run_timers/migrate_timers can safely modify all timers which could
  * be found on ->tvX lists.
  *
- * When the timer's base is locked, and the timer removed from list, it is
- * possible to set timer->base = NULL and drop the lock: the timer remains
- * locked.
+ * When the timer's base is locked and removed from the list, the
+ * TIMER_MIGRATING flag is set, FIXME
  */
 static struct tvec_base *lock_timer_base(struct timer_list *timer,
                                        unsigned long *flags)
        __acquires(timer->base->lock)
 {
-       struct tvec_base *base;
-
        for (;;) {
-               struct tvec_base *prelock_base = timer->base;
-               base = tbase_get_base(prelock_base);
-               if (likely(base != NULL)) {
+               u32 tf = timer->flags;
+               struct tvec_base *base;
+
+               if (!(tf & TIMER_MIGRATING)) {
+                       base = per_cpu_ptr(&tvec_bases, tf & TIMER_CPUMASK);
                        spin_lock_irqsave(&base->lock, *flags);
-                       if (likely(prelock_base == timer->base))
+                       if (timer->flags == tf)
                                return base;
-                       /* The timer has migrated to another CPU */
                        spin_unlock_irqrestore(&base->lock, *flags);
                }
                cpu_relax();
@@ -770,11 +773,11 @@ static struct tvec_base *lock_timer_base(struct timer_list *timer,
 
 static inline int
 __mod_timer(struct timer_list *timer, unsigned long expires,
-                                               bool pending_only, int pinned)
+           bool pending_only, int pinned)
 {
        struct tvec_base *base, *new_base;
        unsigned long flags;
-       int ret = 0 , cpu;
+       int ret = 0;
 
        timer_stats_timer_set_start_info(timer);
        BUG_ON(!timer->function);
@@ -787,8 +790,7 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
 
        debug_activate(timer, expires);
 
-       cpu = get_nohz_timer_target(pinned);
-       new_base = per_cpu(tvec_bases, cpu);
+       new_base = get_target_base(base, pinned);
 
        if (base != new_base) {
                /*
@@ -800,11 +802,13 @@ __mod_timer(struct timer_list *timer, unsigned long expires,
                 */
                if (likely(base->running_timer != timer)) {
                        /* See the comment in lock_timer_base() */
-                       timer_set_base(timer, NULL);
+                       timer->flags |= TIMER_MIGRATING;
+
                        spin_unlock(&base->lock);
                        base = new_base;
                        spin_lock(&base->lock);
-                       timer_set_base(timer, base);
+                       timer->flags &= ~TIMER_BASEMASK;
+                       timer->flags |= base->cpu;
                }
        }
 
@@ -966,13 +970,13 @@ EXPORT_SYMBOL(add_timer);
  */
 void add_timer_on(struct timer_list *timer, int cpu)
 {
-       struct tvec_base *base = per_cpu(tvec_bases, cpu);
+       struct tvec_base *base = per_cpu_ptr(&tvec_bases, cpu);
        unsigned long flags;
 
        timer_stats_timer_set_start_info(timer);
        BUG_ON(timer_pending(timer) || !timer->function);
        spin_lock_irqsave(&base->lock, flags);
-       timer_set_base(timer, base);
+       timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
        debug_activate(timer, timer->expires);
        internal_add_timer(base, timer);
        spin_unlock_irqrestore(&base->lock, flags);
@@ -1037,8 +1041,6 @@ int try_to_del_timer_sync(struct timer_list *timer)
 EXPORT_SYMBOL(try_to_del_timer_sync);
 
 #ifdef CONFIG_SMP
-static DEFINE_PER_CPU(struct tvec_base, __tvec_bases);
-
 /**
  * del_timer_sync - deactivate a timer and wait for the handler to finish.
  * @timer: the timer to be deactivated
@@ -1093,7 +1095,7 @@ int del_timer_sync(struct timer_list *timer)
         * don't use it in hardirq context, because it
         * could lead to deadlock.
         */
-       WARN_ON(in_irq() && !tbase_get_irqsafe(timer->base));
+       WARN_ON(in_irq() && !(timer->flags & TIMER_IRQSAFE));
        for (;;) {
                int ret = try_to_del_timer_sync(timer);
                if (ret >= 0)
@@ -1107,17 +1109,17 @@ EXPORT_SYMBOL(del_timer_sync);
 static int cascade(struct tvec_base *base, struct tvec *tv, int index)
 {
        /* cascade all the timers from tv up one level */
-       struct timer_list *timer, *tmp;
-       struct list_head tv_list;
+       struct timer_list *timer;
+       struct hlist_node *tmp;
+       struct hlist_head tv_list;
 
-       list_replace_init(tv->vec + index, &tv_list);
+       hlist_move_list(tv->vec + index, &tv_list);
 
        /*
         * We are removing _all_ timers from the list, so we
         * don't have to detach them individually.
         */
-       list_for_each_entry_safe(timer, tmp, &tv_list, entry) {
-               BUG_ON(tbase_get_base(timer->base) != base);
+       hlist_for_each_entry_safe(timer, tmp, &tv_list, entry) {
                /* No accounting, while moving them */
                __internal_add_timer(base, timer);
        }
@@ -1182,14 +1184,18 @@ static inline void __run_timers(struct tvec_base *base)
        struct timer_list *timer;
 
        spin_lock_irq(&base->lock);
-       if (catchup_timer_jiffies(base)) {
-               spin_unlock_irq(&base->lock);
-               return;
-       }
+
        while (time_after_eq(jiffies, base->timer_jiffies)) {
-               struct list_head work_list;
-               struct list_head *head = &work_list;
-               int index = base->timer_jiffies & TVR_MASK;
+               struct hlist_head work_list;
+               struct hlist_head *head = &work_list;
+               int index;
+
+               if (!base->all_timers) {
+                       base->timer_jiffies = jiffies;
+                       break;
+               }
+
+               index = base->timer_jiffies & TVR_MASK;
 
                /*
                 * Cascade timers:
@@ -1200,16 +1206,16 @@ static inline void __run_timers(struct tvec_base *base)
                                        !cascade(base, &base->tv4, INDEX(2)))
                        cascade(base, &base->tv5, INDEX(3));
                ++base->timer_jiffies;
-               list_replace_init(base->tv1.vec + index, head);
-               while (!list_empty(head)) {
+               hlist_move_list(base->tv1.vec + index, head);
+               while (!hlist_empty(head)) {
                        void (*fn)(unsigned long);
                        unsigned long data;
                        bool irqsafe;
 
-                       timer = list_first_entry(head, struct timer_list,entry);
+                       timer = hlist_entry(head->first, struct timer_list, entry);
                        fn = timer->function;
                        data = timer->data;
-                       irqsafe = tbase_get_irqsafe(timer->base);
+                       irqsafe = timer->flags & TIMER_IRQSAFE;
 
                        timer_stats_account_timer(timer);
 
@@ -1248,8 +1254,8 @@ static unsigned long __next_timer_interrupt(struct tvec_base *base)
        /* Look for timer events in tv1. */
        index = slot = timer_jiffies & TVR_MASK;
        do {
-               list_for_each_entry(nte, base->tv1.vec + slot, entry) {
-                       if (tbase_get_deferrable(nte->base))
+               hlist_for_each_entry(nte, base->tv1.vec + slot, entry) {
+                       if (nte->flags & TIMER_DEFERRABLE)
                                continue;
 
                        found = 1;
@@ -1279,8 +1285,8 @@ cascade:
 
                index = slot = timer_jiffies & TVN_MASK;
                do {
-                       list_for_each_entry(nte, varp->vec + slot, entry) {
-                               if (tbase_get_deferrable(nte->base))
+                       hlist_for_each_entry(nte, varp->vec + slot, entry) {
+                               if (nte->flags & TIMER_DEFERRABLE)
                                        continue;
 
                                found = 1;
@@ -1311,54 +1317,48 @@ cascade:
  * Check, if the next hrtimer event is before the next timer wheel
  * event:
  */
-static unsigned long cmp_next_hrtimer_event(unsigned long now,
-                                           unsigned long expires)
+static u64 cmp_next_hrtimer_event(u64 basem, u64 expires)
 {
-       ktime_t hr_delta = hrtimer_get_next_event();
-       struct timespec tsdelta;
-       unsigned long delta;
-
-       if (hr_delta.tv64 == KTIME_MAX)
-               return expires;
+       u64 nextevt = hrtimer_get_next_event();
 
        /*
-        * Expired timer available, let it expire in the next tick
+        * If high resolution timers are enabled
+        * hrtimer_get_next_event() returns KTIME_MAX.
         */
-       if (hr_delta.tv64 <= 0)
-               return now + 1;
-
-       tsdelta = ktime_to_timespec(hr_delta);
-       delta = timespec_to_jiffies(&tsdelta);
+       if (expires <= nextevt)
+               return expires;
 
        /*
-        * Limit the delta to the max value, which is checked in
-        * tick_nohz_stop_sched_tick():
+        * If the next timer is already expired, return the tick base
+        * time so the tick is fired immediately.
         */
-       if (delta > NEXT_TIMER_MAX_DELTA)
-               delta = NEXT_TIMER_MAX_DELTA;
+       if (nextevt <= basem)
+               return basem;
 
        /*
-        * Take rounding errors in to account and make sure, that it
-        * expires in the next tick. Otherwise we go into an endless
-        * ping pong due to tick_nohz_stop_sched_tick() retriggering
-        * the timer softirq
+        * Round up to the next jiffie. High resolution timers are
+        * off, so the hrtimers are expired in the tick and we need to
+        * make sure that this tick really expires the timer to avoid
+        * a ping pong of the nohz stop code.
+        *
+        * Use DIV_ROUND_UP_ULL to prevent gcc calling __divdi3
         */
-       if (delta < 1)
-               delta = 1;
-       now += delta;
-       if (time_before(now, expires))
-               return now;
-       return expires;
+       return DIV_ROUND_UP_ULL(nextevt, TICK_NSEC) * TICK_NSEC;
 }
 
 /**
- * get_next_timer_interrupt - return the jiffy of the next pending timer
- * @now: current time (in jiffies)
+ * get_next_timer_interrupt - return the time (clock mono) of the next timer
+ * @basej:     base time jiffies
+ * @basem:     base time clock monotonic
+ *
+ * Returns the tick aligned clock monotonic time of the next pending
+ * timer or KTIME_MAX if no timer is pending.
  */
-unsigned long get_next_timer_interrupt(unsigned long now)
+u64 get_next_timer_interrupt(unsigned long basej, u64 basem)
 {
-       struct tvec_base *base = __this_cpu_read(tvec_bases);
-       unsigned long expires = now + NEXT_TIMER_MAX_DELTA;
+       struct tvec_base *base = this_cpu_ptr(&tvec_bases);
+       u64 expires = KTIME_MAX;
+       unsigned long nextevt;
 
        /*
         * Pretend that there is no timer pending if the cpu is offline.
@@ -1371,14 +1371,15 @@ unsigned long get_next_timer_interrupt(unsigned long now)
        if (base->active_timers) {
                if (time_before_eq(base->next_timer, base->timer_jiffies))
                        base->next_timer = __next_timer_interrupt(base);
-               expires = base->next_timer;
+               nextevt = base->next_timer;
+               if (time_before_eq(nextevt, basej))
+                       expires = basem;
+               else
+                       expires = basem + (nextevt - basej) * TICK_NSEC;
        }
        spin_unlock(&base->lock);
 
-       if (time_before_eq(expires, now))
-               return now;
-
-       return cmp_next_hrtimer_event(now, expires);
+       return cmp_next_hrtimer_event(basem, expires);
 }
 #endif
 
@@ -1407,9 +1408,7 @@ void update_process_times(int user_tick)
  */
 static void run_timer_softirq(struct softirq_action *h)
 {
-       struct tvec_base *base = __this_cpu_read(tvec_bases);
-
-       hrtimer_run_pending();
+       struct tvec_base *base = this_cpu_ptr(&tvec_bases);
 
        if (time_after_eq(jiffies, base->timer_jiffies))
                __run_timers(base);
@@ -1545,15 +1544,16 @@ signed long __sched schedule_timeout_uninterruptible(signed long timeout)
 EXPORT_SYMBOL(schedule_timeout_uninterruptible);
 
 #ifdef CONFIG_HOTPLUG_CPU
-static void migrate_timer_list(struct tvec_base *new_base, struct list_head *head)
+static void migrate_timer_list(struct tvec_base *new_base, struct hlist_head *head)
 {
        struct timer_list *timer;
+       int cpu = new_base->cpu;
 
-       while (!list_empty(head)) {
-               timer = list_first_entry(head, struct timer_list, entry);
+       while (!hlist_empty(head)) {
+               timer = hlist_entry(head->first, struct timer_list, entry);
                /* We ignore the accounting on the dying cpu */
                detach_timer(timer, false);
-               timer_set_base(timer, new_base);
+               timer->flags = (timer->flags & ~TIMER_BASEMASK) | cpu;
                internal_add_timer(new_base, timer);
        }
 }
@@ -1565,8 +1565,8 @@ static void migrate_timers(int cpu)
        int i;
 
        BUG_ON(cpu_online(cpu));
-       old_base = per_cpu(tvec_bases, cpu);
-       new_base = get_cpu_var(tvec_bases);
+       old_base = per_cpu_ptr(&tvec_bases, cpu);
+       new_base = this_cpu_ptr(&tvec_bases);
        /*
         * The caller is globally serialized and nobody else
         * takes two locks at once, deadlock is not possible.
@@ -1590,7 +1590,6 @@ static void migrate_timers(int cpu)
 
        spin_unlock(&old_base->lock);
        spin_unlock_irq(&new_base->lock);
-       put_cpu_var(tvec_bases);
 }
 
 static int timer_cpu_notify(struct notifier_block *self,
@@ -1616,52 +1615,27 @@ static inline void timer_register_cpu_notifier(void)
 static inline void timer_register_cpu_notifier(void) { }
 #endif /* CONFIG_HOTPLUG_CPU */
 
-static void __init init_timer_cpu(struct tvec_base *base, int cpu)
+static void __init init_timer_cpu(int cpu)
 {
-       int j;
-
-       BUG_ON(base != tbase_get_base(base));
+       struct tvec_base *base = per_cpu_ptr(&tvec_bases, cpu);
 
        base->cpu = cpu;
-       per_cpu(tvec_bases, cpu) = base;
        spin_lock_init(&base->lock);
 
-       for (j = 0; j < TVN_SIZE; j++) {
-               INIT_LIST_HEAD(base->tv5.vec + j);
-               INIT_LIST_HEAD(base->tv4.vec + j);
-               INIT_LIST_HEAD(base->tv3.vec + j);
-               INIT_LIST_HEAD(base->tv2.vec + j);
-       }
-       for (j = 0; j < TVR_SIZE; j++)
-               INIT_LIST_HEAD(base->tv1.vec + j);
-
        base->timer_jiffies = jiffies;
        base->next_timer = base->timer_jiffies;
 }
 
 static void __init init_timer_cpus(void)
 {
-       struct tvec_base *base;
-       int local_cpu = smp_processor_id();
        int cpu;
 
-       for_each_possible_cpu(cpu) {
-               if (cpu == local_cpu)
-                       base = &boot_tvec_bases;
-#ifdef CONFIG_SMP
-               else
-                       base = per_cpu_ptr(&__tvec_bases, cpu);
-#endif
-
-               init_timer_cpu(base, cpu);
-       }
+       for_each_possible_cpu(cpu)
+               init_timer_cpu(cpu);
 }
 
 void __init init_timers(void)
 {
-       /* ensure there are enough low bits for flags in timer->base pointer */
-       BUILD_BUG_ON(__alignof__(struct tvec_base) & TIMER_FLAG_MASK);
-
        init_timer_cpus();
        init_timer_stats();
        timer_register_cpu_notifier();
@@ -1697,14 +1671,14 @@ unsigned long msleep_interruptible(unsigned int msecs)
 
 EXPORT_SYMBOL(msleep_interruptible);
 
-static int __sched do_usleep_range(unsigned long min, unsigned long max)
+static void __sched do_usleep_range(unsigned long min, unsigned long max)
 {
        ktime_t kmin;
        unsigned long delta;
 
        kmin = ktime_set(0, min * NSEC_PER_USEC);
        delta = (max - min) * NSEC_PER_USEC;
-       return schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
+       schedule_hrtimeout_range(&kmin, delta, HRTIMER_MODE_REL);
 }
 
 /**
@@ -1712,7 +1686,7 @@ static int __sched do_usleep_range(unsigned long min, unsigned long max)
  * @min: Minimum time in usecs to sleep
  * @max: Maximum time in usecs to sleep
  */
-void usleep_range(unsigned long min, unsigned long max)
+void __sched usleep_range(unsigned long min, unsigned long max)
 {
        __set_current_state(TASK_UNINTERRUPTIBLE);
        do_usleep_range(min, max);
index e878c2e0ba45e06c4690646a8853406e11dd1a15..a4536e1e3e2ab7f0e298322e2217474a2f05579d 100644 (file)
@@ -29,19 +29,24 @@ struct timer_list_iter {
 
 typedef void (*print_fn_t)(struct seq_file *m, unsigned int *classes);
 
-DECLARE_PER_CPU(struct hrtimer_cpu_base, hrtimer_bases);
-
 /*
  * This allows printing both to /proc/timer_list and
  * to the console (on SysRq-Q):
  */
-#define SEQ_printf(m, x...)                    \
- do {                                          \
-       if (m)                                  \
-               seq_printf(m, x);               \
-       else                                    \
-               printk(x);                      \
- } while (0)
+__printf(2, 3)
+static void SEQ_printf(struct seq_file *m, const char *fmt, ...)
+{
+       va_list args;
+
+       va_start(args, fmt);
+
+       if (m)
+               seq_vprintf(m, fmt, args);
+       else
+               vprintk(fmt, args);
+
+       va_end(args);
+}
 
 static void print_name_offset(struct seq_file *m, void *sym)
 {
@@ -120,10 +125,10 @@ static void
 print_base(struct seq_file *m, struct hrtimer_clock_base *base, u64 now)
 {
        SEQ_printf(m, "  .base:       %pK\n", base);
-       SEQ_printf(m, "  .index:      %d\n",
-                       base->index);
-       SEQ_printf(m, "  .resolution: %Lu nsecs\n",
-                       (unsigned long long)ktime_to_ns(base->resolution));
+       SEQ_printf(m, "  .index:      %d\n", base->index);
+
+       SEQ_printf(m, "  .resolution: %u nsecs\n", (unsigned) hrtimer_resolution);
+
        SEQ_printf(m,   "  .get_time:   ");
        print_name_offset(m, base->get_time);
        SEQ_printf(m,   "\n");
@@ -158,7 +163,7 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now)
        P(nr_events);
        P(nr_retries);
        P(nr_hangs);
-       P_ns(max_hang_time);
+       P(max_hang_time);
 #endif
 #undef P
 #undef P_ns
@@ -184,7 +189,7 @@ static void print_cpu(struct seq_file *m, int cpu, u64 now)
                P_ns(idle_sleeptime);
                P_ns(iowait_sleeptime);
                P(last_jiffies);
-               P(next_jiffies);
+               P(next_timer);
                P_ns(idle_expires);
                SEQ_printf(m, "jiffies: %Lu\n",
                           (unsigned long long)jiffies);
@@ -251,6 +256,12 @@ print_tickdevice(struct seq_file *m, struct tick_device *td, int cpu)
                        SEQ_printf(m, "\n");
                }
 
+               if (dev->set_state_oneshot_stopped) {
+                       SEQ_printf(m, " oneshot stopped: ");
+                       print_name_offset(m, dev->set_state_oneshot_stopped);
+                       SEQ_printf(m, "\n");
+               }
+
                if (dev->tick_resume) {
                        SEQ_printf(m, " resume:   ");
                        print_name_offset(m, dev->tick_resume);
@@ -269,11 +280,11 @@ static void timer_list_show_tickdevices_header(struct seq_file *m)
 {
 #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
        print_tickdevice(m, tick_get_broadcast_device(), -1);
-       SEQ_printf(m, "tick_broadcast_mask: %08lx\n",
-                  cpumask_bits(tick_get_broadcast_mask())[0]);
+       SEQ_printf(m, "tick_broadcast_mask: %*pb\n",
+                  cpumask_pr_args(tick_get_broadcast_mask()));
 #ifdef CONFIG_TICK_ONESHOT
-       SEQ_printf(m, "tick_broadcast_oneshot_mask: %08lx\n",
-                  cpumask_bits(tick_get_broadcast_oneshot_mask())[0]);
+       SEQ_printf(m, "tick_broadcast_oneshot_mask: %*pb\n",
+                  cpumask_pr_args(tick_get_broadcast_oneshot_mask()));
 #endif
        SEQ_printf(m, "\n");
 #endif
@@ -282,7 +293,7 @@ static void timer_list_show_tickdevices_header(struct seq_file *m)
 
 static inline void timer_list_header(struct seq_file *m, u64 now)
 {
-       SEQ_printf(m, "Timer List Version: v0.7\n");
+       SEQ_printf(m, "Timer List Version: v0.8\n");
        SEQ_printf(m, "HRTIMER_MAX_CLOCK_BASES: %d\n", HRTIMER_MAX_CLOCK_BASES);
        SEQ_printf(m, "now at %Ld nsecs\n", (unsigned long long)now);
        SEQ_printf(m, "\n");
index 1fb08f21302ece707ac7ea3b5210863cae8b19c2..1adecb4b87c8492e558b845914bd5c9a004fe9c0 100644 (file)
@@ -68,7 +68,7 @@ struct entry {
         * Number of timeout events:
         */
        unsigned long           count;
-       unsigned int            timer_flag;
+       u32                     flags;
 
        /*
         * We save the command-line string to preserve
@@ -227,13 +227,13 @@ static struct entry *tstat_lookup(struct entry *entry, char *comm)
  * @startf:    pointer to the function which did the timer setup
  * @timerf:    pointer to the timer callback function of the timer
  * @comm:      name of the process which set up the timer
+ * @tflags:    The flags field of the timer
  *
  * When the timer is already registered, then the event counter is
  * incremented. Otherwise the timer is registered in a free slot.
  */
 void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
-                             void *timerf, char *comm,
-                             unsigned int timer_flag)
+                             void *timerf, char *comm, u32 tflags)
 {
        /*
         * It doesn't matter which lock we take:
@@ -251,7 +251,7 @@ void timer_stats_update_stats(void *timer, pid_t pid, void *startf,
        input.start_func = startf;
        input.expire_func = timerf;
        input.pid = pid;
-       input.timer_flag = timer_flag;
+       input.flags = tflags;
 
        raw_spin_lock_irqsave(lock, flags);
        if (!timer_stats_active)
@@ -306,7 +306,7 @@ static int tstats_show(struct seq_file *m, void *v)
 
        for (i = 0; i < nr_entries; i++) {
                entry = entries + i;
-               if (entry->timer_flag & TIMER_STATS_FLAG_DEFERRABLE) {
+               if (entry->flags & TIMER_DEFERRABLE) {
                        seq_printf(m, "%4luD, %5d %-16s ",
                                entry->count, entry->pid, entry->comm);
                } else {
index dd70993c266c38785510ab09f0315d1f1775d05b..3e4840633d3ee7bd926f1fe67f8b0a4b324514da 100644 (file)
@@ -409,7 +409,7 @@ static void (*torture_shutdown_hook)(void);
  */
 void torture_shutdown_absorb(const char *title)
 {
-       while (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
+       while (READ_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
                pr_notice("torture thread %s parking due to system shutdown\n",
                          title);
                schedule_timeout_uninterruptible(MAX_SCHEDULE_TIMEOUT);
@@ -480,9 +480,9 @@ static int torture_shutdown_notify(struct notifier_block *unused1,
                                   unsigned long unused2, void *unused3)
 {
        mutex_lock(&fullstop_mutex);
-       if (ACCESS_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
+       if (READ_ONCE(fullstop) == FULLSTOP_DONTSTOP) {
                VERBOSE_TOROUT_STRING("Unscheduled system shutdown detected");
-               ACCESS_ONCE(fullstop) = FULLSTOP_SHUTDOWN;
+               WRITE_ONCE(fullstop, FULLSTOP_SHUTDOWN);
        } else {
                pr_warn("Concurrent rmmod and shutdown illegal!\n");
        }
@@ -523,13 +523,13 @@ static int stutter;
  */
 void stutter_wait(const char *title)
 {
-       while (ACCESS_ONCE(stutter_pause_test) ||
-              (torture_runnable && !ACCESS_ONCE(*torture_runnable))) {
+       while (READ_ONCE(stutter_pause_test) ||
+              (torture_runnable && !READ_ONCE(*torture_runnable))) {
                if (stutter_pause_test)
-                       if (ACCESS_ONCE(stutter_pause_test) == 1)
+                       if (READ_ONCE(stutter_pause_test) == 1)
                                schedule_timeout_interruptible(1);
                        else
-                               while (ACCESS_ONCE(stutter_pause_test))
+                               while (READ_ONCE(stutter_pause_test))
                                        cond_resched();
                else
                        schedule_timeout_interruptible(round_jiffies_relative(HZ));
@@ -549,14 +549,14 @@ static int torture_stutter(void *arg)
                if (!torture_must_stop()) {
                        if (stutter > 1) {
                                schedule_timeout_interruptible(stutter - 1);
-                               ACCESS_ONCE(stutter_pause_test) = 2;
+                               WRITE_ONCE(stutter_pause_test, 2);
                        }
                        schedule_timeout_interruptible(1);
-                       ACCESS_ONCE(stutter_pause_test) = 1;
+                       WRITE_ONCE(stutter_pause_test, 1);
                }
                if (!torture_must_stop())
                        schedule_timeout_interruptible(stutter);
-               ACCESS_ONCE(stutter_pause_test) = 0;
+               WRITE_ONCE(stutter_pause_test, 0);
                torture_shutdown_absorb("torture_stutter");
        } while (!torture_must_stop());
        torture_kthread_stopping("torture_stutter");
@@ -642,13 +642,13 @@ EXPORT_SYMBOL_GPL(torture_init_end);
 bool torture_cleanup_begin(void)
 {
        mutex_lock(&fullstop_mutex);
-       if (ACCESS_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
+       if (READ_ONCE(fullstop) == FULLSTOP_SHUTDOWN) {
                pr_warn("Concurrent rmmod and shutdown illegal!\n");
                mutex_unlock(&fullstop_mutex);
                schedule_timeout_uninterruptible(10);
                return true;
        }
-       ACCESS_ONCE(fullstop) = FULLSTOP_RMMOD;
+       WRITE_ONCE(fullstop, FULLSTOP_RMMOD);
        mutex_unlock(&fullstop_mutex);
        torture_shutdown_cleanup();
        torture_shuffle_cleanup();
@@ -681,7 +681,7 @@ EXPORT_SYMBOL_GPL(torture_must_stop);
  */
 bool torture_must_stop_irq(void)
 {
-       return ACCESS_ONCE(fullstop) != FULLSTOP_DONTSTOP;
+       return READ_ONCE(fullstop) != FULLSTOP_DONTSTOP;
 }
 EXPORT_SYMBOL_GPL(torture_must_stop_irq);
 
index 13d945c0d03f2bda5802971484b21bbe9f65301f..1b28df2d91042de97566454a80dcb36d24674a49 100644 (file)
@@ -450,7 +450,7 @@ static int __init ring_buffer_benchmark_init(void)
 
        if (producer_fifo >= 0) {
                struct sched_param param = {
-                       .sched_priority = consumer_fifo
+                       .sched_priority = producer_fifo
                };
                sched_setscheduler(producer, SCHED_FIFO, &param);
        } else
index ced69da0ff55ba08a7358cae7ceaae31546f9332..7f2e97ce71a7d12a9b2ed5e703969e635f320a57 100644 (file)
@@ -1369,19 +1369,26 @@ static int check_preds(struct filter_parse_state *ps)
 {
        int n_normal_preds = 0, n_logical_preds = 0;
        struct postfix_elt *elt;
+       int cnt = 0;
 
        list_for_each_entry(elt, &ps->postfix, list) {
-               if (elt->op == OP_NONE)
+               if (elt->op == OP_NONE) {
+                       cnt++;
                        continue;
+               }
 
                if (elt->op == OP_AND || elt->op == OP_OR) {
                        n_logical_preds++;
+                       cnt--;
                        continue;
                }
+               if (elt->op != OP_NOT)
+                       cnt--;
                n_normal_preds++;
+               WARN_ON_ONCE(cnt < 0);
        }
 
-       if (!n_normal_preds || n_logical_preds >= n_normal_preds) {
+       if (cnt != 1 || !n_normal_preds || n_logical_preds >= n_normal_preds) {
                parse_error(ps, FILT_ERR_INVALID_FILTER, 0);
                return -EINVAL;
        }
index 692bf7184c8c1322f54bc4fe529e4519ba22a35e..25a086bcb7004778057b1d9570dd02a275601d0a 100644 (file)
@@ -178,12 +178,13 @@ ftrace_print_hex_seq(struct trace_seq *p, const unsigned char *buf, int buf_len)
 EXPORT_SYMBOL(ftrace_print_hex_seq);
 
 const char *
-ftrace_print_array_seq(struct trace_seq *p, const void *buf, int buf_len,
+ftrace_print_array_seq(struct trace_seq *p, const void *buf, int count,
                       size_t el_size)
 {
        const char *ret = trace_seq_buffer_ptr(p);
        const char *prefix = "";
        void *ptr = (void *)buf;
+       size_t buf_len = count * el_size;
 
        trace_seq_putc(p, '{');
 
index 2316f50b07a456e979603fcee13a66fca03b1baa..581a68a04c64089b847d3b76d1abc138a83bb209 100644 (file)
@@ -41,6 +41,8 @@
 #define NMI_WATCHDOG_ENABLED      (1 << NMI_WATCHDOG_ENABLED_BIT)
 #define SOFT_WATCHDOG_ENABLED     (1 << SOFT_WATCHDOG_ENABLED_BIT)
 
+static DEFINE_MUTEX(watchdog_proc_mutex);
+
 #ifdef CONFIG_HARDLOCKUP_DETECTOR
 static unsigned long __read_mostly watchdog_enabled = SOFT_WATCHDOG_ENABLED|NMI_WATCHDOG_ENABLED;
 #else
@@ -608,26 +610,36 @@ void watchdog_nmi_enable_all(void)
 {
        int cpu;
 
-       if (!watchdog_user_enabled)
-               return;
+       mutex_lock(&watchdog_proc_mutex);
+
+       if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
+               goto unlock;
 
        get_online_cpus();
        for_each_online_cpu(cpu)
                watchdog_nmi_enable(cpu);
        put_online_cpus();
+
+unlock:
+       mutex_unlock(&watchdog_proc_mutex);
 }
 
 void watchdog_nmi_disable_all(void)
 {
        int cpu;
 
+       mutex_lock(&watchdog_proc_mutex);
+
        if (!watchdog_running)
-               return;
+               goto unlock;
 
        get_online_cpus();
        for_each_online_cpu(cpu)
                watchdog_nmi_disable(cpu);
        put_online_cpus();
+
+unlock:
+       mutex_unlock(&watchdog_proc_mutex);
 }
 #else
 static int watchdog_nmi_enable(unsigned int cpu) { return 0; }
@@ -744,8 +756,6 @@ static int proc_watchdog_update(void)
 
 }
 
-static DEFINE_MUTEX(watchdog_proc_mutex);
-
 /*
  * common function for watchdog, nmi_watchdog and soft_watchdog parameter
  *
index ba2b0c87e65b196c7c1f016798e0b59ea8dba97c..b908048f8d6a8e2b33723b222e1a4a88b2841774 100644 (file)
@@ -1233,6 +1233,7 @@ config RCU_TORTURE_TEST
        depends on DEBUG_KERNEL
        select TORTURE_TEST
        select SRCU
+       select TASKS_RCU
        default n
        help
          This option provides a kernel module that runs torture tests
@@ -1261,12 +1262,38 @@ config RCU_TORTURE_TEST_RUNNABLE
          Say N here if you want the RCU torture tests to start only
          after being manually enabled via /proc.
 
+config RCU_TORTURE_TEST_SLOW_PREINIT
+       bool "Slow down RCU grace-period pre-initialization to expose races"
+       depends on RCU_TORTURE_TEST
+       help
+         This option delays grace-period pre-initialization (the
+         propagation of CPU-hotplug changes up the rcu_node combining
+         tree) for a few jiffies between initializing each pair of
+         consecutive rcu_node structures.  This helps to expose races
+         involving grace-period pre-initialization, in other words, it
+         makes your kernel less stable.  It can also greatly increase
+         grace-period latency, especially on systems with large numbers
+         of CPUs.  This is useful when torture-testing RCU, but in
+         almost no other circumstance.
+
+         Say Y here if you want your system to crash and hang more often.
+         Say N if you want a sane system.
+
+config RCU_TORTURE_TEST_SLOW_PREINIT_DELAY
+       int "How much to slow down RCU grace-period pre-initialization"
+       range 0 5
+       default 3
+       depends on RCU_TORTURE_TEST_SLOW_PREINIT
+       help
+         This option specifies the number of jiffies to wait between
+         each rcu_node structure pre-initialization step.
+
 config RCU_TORTURE_TEST_SLOW_INIT
        bool "Slow down RCU grace-period initialization to expose races"
        depends on RCU_TORTURE_TEST
        help
-         This option makes grace-period initialization block for a
-         few jiffies between initializing each pair of consecutive
+         This option delays grace-period initialization for a few
+         jiffies between initializing each pair of consecutive
          rcu_node structures.  This helps to expose races involving
          grace-period initialization, in other words, it makes your
          kernel less stable.  It can also greatly increase grace-period
@@ -1286,6 +1313,30 @@ config RCU_TORTURE_TEST_SLOW_INIT_DELAY
          This option specifies the number of jiffies to wait between
          each rcu_node structure initialization.
 
+config RCU_TORTURE_TEST_SLOW_CLEANUP
+       bool "Slow down RCU grace-period cleanup to expose races"
+       depends on RCU_TORTURE_TEST
+       help
+         This option delays grace-period cleanup for a few jiffies
+         between cleaning up each pair of consecutive rcu_node
+         structures.  This helps to expose races involving grace-period
+         cleanup, in other words, it makes your kernel less stable.
+         It can also greatly increase grace-period latency, especially
+         on systems with large numbers of CPUs.  This is useful when
+         torture-testing RCU, but in almost no other circumstance.
+
+         Say Y here if you want your system to crash and hang more often.
+         Say N if you want a sane system.
+
+config RCU_TORTURE_TEST_SLOW_CLEANUP_DELAY
+       int "How much to slow down RCU grace-period cleanup"
+       range 0 5
+       default 3
+       depends on RCU_TORTURE_TEST_SLOW_CLEANUP
+       help
+         This option specifies the number of jiffies to wait between
+         each rcu_node structure cleanup operation.
+
 config RCU_CPU_STALL_TIMEOUT
        int "RCU CPU stall timeout in seconds"
        depends on RCU_STALL_COMMON
@@ -1322,6 +1373,17 @@ config RCU_TRACE
          Say Y here if you want to enable RCU tracing
          Say N if you are unsure.
 
+config RCU_EQS_DEBUG
+       bool "Use this when adding any sort of NO_HZ support to your arch"
+       depends on DEBUG_KERNEL
+       help
+         This option provides consistency checks in RCU's handling of
+         NO_HZ.  These checks have proven quite helpful in detecting
+         bugs in arch-specific NO_HZ code.
+
+         Say N here if you need ultimate kernel/user switch latencies
+         Say Y if you are unsure
+
 endmenu # "RCU Debugging"
 
 config DEBUG_BLOCK_EXT_DEVT
index 4f134d8907a7d03760877bfa41a3a10e7747913f..f610b2a10b3eda40c8e04f30f3779556502e9fe6 100644 (file)
@@ -191,7 +191,7 @@ int cpu_rmap_update(struct cpu_rmap *rmap, u16 index,
        /* Update distances based on topology */
        for_each_cpu(cpu, update_mask) {
                if (cpu_rmap_copy_neigh(rmap, cpu,
-                                       topology_thread_cpumask(cpu), 1))
+                                       topology_sibling_cpumask(cpu), 1))
                        continue;
                if (cpu_rmap_copy_neigh(rmap, cpu,
                                        topology_core_cpumask(cpu), 2))
index 830dd5dec40f1697b2bf2e40a294e05284ded89c..5a70f6196f577a071ae0a31e9da7fa0e1dd1bc68 100644 (file)
 int cpumask_next_and(int n, const struct cpumask *src1p,
                     const struct cpumask *src2p)
 {
-       struct cpumask tmp;
-
-       if (cpumask_and(&tmp, src1p, src2p))
-               return cpumask_next(n, &tmp);
-       return nr_cpu_ids;
+       while ((n = cpumask_next(n, src1p)) < nr_cpu_ids)
+               if (cpumask_test_cpu(n, src2p))
+                       break;
+       return n;
 }
 EXPORT_SYMBOL(cpumask_next_and);
 
@@ -139,64 +138,42 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask)
 #endif
 
 /**
- * cpumask_set_cpu_local_first - set i'th cpu with local numa cpu's first
- *
+ * cpumask_local_spread - select the i'th cpu with local numa cpu's first
  * @i: index number
- * @numa_node: local numa_node
- * @dstp: cpumask with the relevant cpu bit set according to the policy
+ * @node: local numa_node
  *
- * This function sets the cpumask according to a numa aware policy.
- * cpumask could be used as an affinity hint for the IRQ related to a
- * queue. When the policy is to spread queues across cores - local cores
- * first.
+ * This function selects an online CPU according to a numa aware policy;
+ * local cpus are returned first, followed by non-local ones, then it
+ * wraps around.
  *
- * Returns 0 on success, -ENOMEM for no memory, and -EAGAIN when failed to set
- * the cpu bit and need to re-call the function.
+ * It's not very efficient, but useful for setup.
  */
-int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp)
+unsigned int cpumask_local_spread(unsigned int i, int node)
 {
-       cpumask_var_t mask;
        int cpu;
-       int ret = 0;
-
-       if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
-               return -ENOMEM;
 
+       /* Wrap: we always want a cpu. */
        i %= num_online_cpus();
 
-       if (numa_node == -1 || !cpumask_of_node(numa_node)) {
-               /* Use all online cpu's for non numa aware system */
-               cpumask_copy(mask, cpu_online_mask);
+       if (node == -1) {
+               for_each_cpu(cpu, cpu_online_mask)
+                       if (i-- == 0)
+                               return cpu;
        } else {
-               int n;
-
-               cpumask_and(mask,
-                           cpumask_of_node(numa_node), cpu_online_mask);
-
-               n = cpumask_weight(mask);
-               if (i >= n) {
-                       i -= n;
-
-                       /* If index > number of local cpu's, mask out local
-                        * cpu's
-                        */
-                       cpumask_andnot(mask, cpu_online_mask, mask);
+               /* NUMA first. */
+               for_each_cpu_and(cpu, cpumask_of_node(node), cpu_online_mask)
+                       if (i-- == 0)
+                               return cpu;
+
+               for_each_cpu(cpu, cpu_online_mask) {
+                       /* Skip NUMA nodes, done above. */
+                       if (cpumask_test_cpu(cpu, cpumask_of_node(node)))
+                               continue;
+
+                       if (i-- == 0)
+                               return cpu;
                }
        }
-
-       for_each_cpu(cpu, mask) {
-               if (--i < 0)
-                       goto out;
-       }
-
-       ret = -EAGAIN;
-
-out:
-       free_cpumask_var(mask);
-
-       if (!ret)
-               cpumask_set_cpu(cpu, dstp);
-
-       return ret;
+       BUG();
 }
-EXPORT_SYMBOL(cpumask_set_cpu_local_first);
+EXPORT_SYMBOL(cpumask_local_spread);
index aac511417ad19af5d9e3472747a983be5ed3ee4b..a89d041592c8bfa7b092c382962a4085560f5b1a 100644 (file)
@@ -639,7 +639,7 @@ do { \
        **************  MIPS  *****************
        ***************************************/
 #if defined(__mips__) && W_TYPE_SIZE == 32
-#if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4
+#if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
 #define umul_ppmm(w1, w0, u, v)                        \
 do {                                           \
        UDItype __ll = (UDItype)(u) * (v);      \
@@ -671,7 +671,7 @@ do {                                                \
        **************  MIPS/64  **************
        ***************************************/
 #if (defined(__mips) && __mips >= 3) && W_TYPE_SIZE == 64
-#if __GNUC__ >= 4 && __GNUC_MINOR__ >= 4
+#if (__GNUC__ >= 5) || (__GNUC__ >= 4 && __GNUC_MINOR__ >= 4)
 #define umul_ppmm(w1, w0, u, v) \
 do {                                                                   \
        typedef unsigned int __ll_UTItype __attribute__((mode(TI)));    \
index 48144cdae819017e8a9c0a89aae976359d87121b..f051d69f0910a65be2dbce9799736e2ba4eee2c7 100644 (file)
@@ -197,13 +197,13 @@ static int percpu_counter_hotcpu_callback(struct notifier_block *nb,
  * Compare counter against given value.
  * Return 1 if greater, 0 if equal and -1 if less
  */
-int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
+int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch)
 {
        s64     count;
 
        count = percpu_counter_read(fbc);
        /* Check to see if rough count will be sufficient for comparison */
-       if (abs(count - rhs) > (percpu_counter_batch*num_online_cpus())) {
+       if (abs(count - rhs) > (batch * num_online_cpus())) {
                if (count > rhs)
                        return 1;
                else
@@ -218,7 +218,7 @@ int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
        else
                return 0;
 }
-EXPORT_SYMBOL(percpu_counter_compare);
+EXPORT_SYMBOL(__percpu_counter_compare);
 
 static int __init percpu_counter_startup(void)
 {
index 3d2aa27b845b53f9e281e7bf5b7a080fd24c5bbb..061550de77bc040878a1a62ef72816d85043b3a6 100644 (file)
@@ -33,7 +33,7 @@
 #include <linux/string.h>
 #include <linux/bitops.h>
 #include <linux/rcupdate.h>
-#include <linux/preempt_mask.h>                /* in_interrupt() */
+#include <linux/preempt.h>             /* in_interrupt() */
 
 
 /*
index b7595484a8150c02e086f5d1a3ac41cd971bd91b..8fe9d9662abbcda7563000c00e5f516dc156f05f 100644 (file)
@@ -23,7 +23,7 @@
 
 #ifdef __KERNEL__ /* Real code */
 
-#include <asm/i387.h>
+#include <asm/fpu/api.h>
 
 #else /* Dummy code for user space testing */
 
index b28df4019adedfe182d5c719da62b6bf05c031fe..8609378e6505123a3688e0e95a18cdde013e278a 100644 (file)
@@ -14,6 +14,7 @@
  * published by the Free Software Foundation.
  */
 
+#include <linux/atomic.h>
 #include <linux/kernel.h>
 #include <linux/init.h>
 #include <linux/log2.h>
@@ -25,6 +26,7 @@
 #include <linux/random.h>
 #include <linux/rhashtable.h>
 #include <linux/err.h>
+#include <linux/export.h>
 
 #define HASH_DEFAULT_SIZE      64UL
 #define HASH_MIN_SIZE          4U
@@ -446,6 +448,10 @@ int rhashtable_insert_slow(struct rhashtable *ht, const void *key,
        if (key && rhashtable_lookup_fast(ht, key, ht->p))
                goto exit;
 
+       err = -E2BIG;
+       if (unlikely(rht_grow_above_max(ht, tbl)))
+               goto exit;
+
        err = -EAGAIN;
        if (rhashtable_check_elasticity(ht, tbl, hash) ||
            rht_grow_above_100(ht, tbl))
@@ -738,6 +744,12 @@ int rhashtable_init(struct rhashtable *ht,
        if (params->max_size)
                ht->p.max_size = rounddown_pow_of_two(params->max_size);
 
+       if (params->insecure_max_entries)
+               ht->p.insecure_max_entries =
+                       rounddown_pow_of_two(params->insecure_max_entries);
+       else
+               ht->p.insecure_max_entries = ht->p.max_size * 2;
+
        ht->p.min_size = max(ht->p.min_size, HASH_MIN_SIZE);
 
        /* The maximum (not average) chain length grows with the
index a28df5206d95c24d6f3b4116753747f1fb2a67e3..3a5f2b366d84ed209a012cf62491ca30f6a8bca8 100644 (file)
@@ -57,7 +57,8 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
                        return res + find_zero(data) + 1 - align;
                }
                res += sizeof(unsigned long);
-               if (unlikely(max < sizeof(unsigned long)))
+               /* We already handled 'unsigned long' bytes. Did we do it all ? */
+               if (unlikely(max <= sizeof(unsigned long)))
                        break;
                max -= sizeof(unsigned long);
                if (unlikely(__get_user(c,(unsigned long __user *)(src+res))))
@@ -84,13 +85,21 @@ static inline long do_strnlen_user(const char __user *src, unsigned long count,
  * @str: The string to measure.
  * @count: Maximum count (including NUL character)
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * Get the size of a NUL-terminated string in user space.
  *
  * Returns the size of the string INCLUDING the terminating NUL.
- * If the string is too long, returns 'count+1'.
+ * If the string is too long, returns a number larger than @count. User
+ * has to check the return value against "> count".
  * On exception (or invalid count), returns 0.
+ *
+ * NOTE! You should basically never use this function. There is
+ * almost never any valid case for using the length of a user space
+ * string, since the string can be changed at any time by other
+ * threads. Use "strncpy_from_user()" instead to get a stable copy
+ * of the string.
  */
 long strnlen_user(const char __user *str, long count)
 {
@@ -113,7 +122,8 @@ EXPORT_SYMBOL(strnlen_user);
  * strlen_user: - Get the size of a user string INCLUDING final NUL.
  * @str: The string to measure.
  *
- * Context: User context only.  This function may sleep.
+ * Context: User context only. This function may sleep if pagefaults are
+ *          enabled.
  *
  * Get the size of a NUL-terminated string in user space.
  *
index 4abda074ea458947390b84c36f3eaad7095a2ceb..42e192decbfd605dc4175cfbe9c93b260bbad8ef 100644 (file)
@@ -537,8 +537,9 @@ EXPORT_SYMBOL_GPL(swiotlb_tbl_map_single);
  * Allocates bounce buffer and returns its kernel virtual address.
  */
 
-phys_addr_t map_single(struct device *hwdev, phys_addr_t phys, size_t size,
-                      enum dma_data_direction dir)
+static phys_addr_t
+map_single(struct device *hwdev, phys_addr_t phys, size_t size,
+          enum dma_data_direction dir)
 {
        dma_addr_t start_dma_addr = phys_to_dma(hwdev, io_tlb_start);
 
@@ -655,7 +656,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
                 */
                phys_addr_t paddr = map_single(hwdev, 0, size, DMA_FROM_DEVICE);
                if (paddr == SWIOTLB_MAP_ERROR)
-                       return NULL;
+                       goto err_warn;
 
                ret = phys_to_virt(paddr);
                dev_addr = phys_to_dma(hwdev, paddr);
@@ -669,7 +670,7 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
                        /* DMA_TO_DEVICE to avoid memcpy in unmap_single */
                        swiotlb_tbl_unmap_single(hwdev, paddr,
                                                 size, DMA_TO_DEVICE);
-                       return NULL;
+                       goto err_warn;
                }
        }
 
@@ -677,6 +678,13 @@ swiotlb_alloc_coherent(struct device *hwdev, size_t size,
        memset(ret, 0, size);
 
        return ret;
+
+err_warn:
+       pr_warn("swiotlb: coherent allocation failed for device %s size=%zu\n",
+               dev_name(hwdev), size);
+       dump_stack();
+
+       return NULL;
 }
 EXPORT_SYMBOL(swiotlb_alloc_coherent);
 
index a382e4a326091691a617a7c3387a09c813c50792..782ae8ca2c06f2b3439b10592cef6a43c31223cd 100644 (file)
@@ -36,7 +36,7 @@
  * Adds the timer node to the timerqueue, sorted by the
  * node's expires value.
  */
-void timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node)
+bool timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node)
 {
        struct rb_node **p = &head->head.rb_node;
        struct rb_node *parent = NULL;
@@ -56,8 +56,11 @@ void timerqueue_add(struct timerqueue_head *head, struct timerqueue_node *node)
        rb_link_node(&node->node, parent, p);
        rb_insert_color(&node->node, &head->head);
 
-       if (!head->next || node->expires.tv64 < head->next->expires.tv64)
+       if (!head->next || node->expires.tv64 < head->next->expires.tv64) {
                head->next = node;
+               return true;
+       }
+       return false;
 }
 EXPORT_SYMBOL_GPL(timerqueue_add);
 
@@ -69,7 +72,7 @@ EXPORT_SYMBOL_GPL(timerqueue_add);
  *
  * Removes the timer node from the timerqueue.
  */
-void timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node)
+bool timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node)
 {
        WARN_ON_ONCE(RB_EMPTY_NODE(&node->node));
 
@@ -82,6 +85,7 @@ void timerqueue_del(struct timerqueue_head *head, struct timerqueue_node *node)
        }
        rb_erase(&node->node, &head->head);
        RB_CLEAR_NODE(&node->node);
+       return head->next != NULL;
 }
 EXPORT_SYMBOL_GPL(timerqueue_del);
 
index 6dc4580df2af040b10bc10a5f9c423becc3ff47e..000e7b3b9896f2a9479687befd2442c43193614e 100644 (file)
@@ -359,23 +359,6 @@ static void bdi_wb_shutdown(struct backing_dev_info *bdi)
        flush_delayed_work(&bdi->wb.dwork);
 }
 
-/*
- * Called when the device behind @bdi has been removed or ejected.
- *
- * We can't really do much here except for reducing the dirty ratio at
- * the moment.  In the future we should be able to set a flag so that
- * the filesystem can handle errors at mark_inode_dirty time instead
- * of only at writeback time.
- */
-void bdi_unregister(struct backing_dev_info *bdi)
-{
-       if (WARN_ON_ONCE(!bdi->dev))
-               return;
-
-       bdi_set_min_ratio(bdi, 0);
-}
-EXPORT_SYMBOL(bdi_unregister);
-
 static void bdi_wb_init(struct bdi_writeback *wb, struct backing_dev_info *bdi)
 {
        memset(wb, 0, sizeof(*wb));
@@ -443,6 +426,7 @@ void bdi_destroy(struct backing_dev_info *bdi)
        int i;
 
        bdi_wb_shutdown(bdi);
+       bdi_set_min_ratio(bdi, 0);
 
        WARN_ON(!list_empty(&bdi->work_list));
        WARN_ON(delayed_work_pending(&bdi->wb.dwork));
index 5405aff5a590c370d3c8ca37c0f9a0c19cb46775..f0fe4f2c1fa7aa865055731834cdd78e0684cca5 100644 (file)
 #define BYTES_PER_POINTER      sizeof(void *)
 
 /* GFP bitmask for kmemleak internal allocations */
-#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC)) | \
+#define gfp_kmemleak_mask(gfp) (((gfp) & (GFP_KERNEL | GFP_ATOMIC | \
+                                          __GFP_NOACCOUNT)) | \
                                 __GFP_NORETRY | __GFP_NOMEMALLOC | \
                                 __GFP_NOWARN)
 
index 14c2f2017e37cc405e52cb12bc30b128997f1f8e..a04225d372ba3ab77516b970c10135b19def3ac4 100644 (file)
@@ -2323,6 +2323,8 @@ done_restock:
        css_get_many(&memcg->css, batch);
        if (batch > nr_pages)
                refill_stock(memcg, batch - nr_pages);
+       if (!(gfp_mask & __GFP_WAIT))
+               goto done;
        /*
         * If the hierarchy is above the normal consumption range,
         * make the charging task trim their excess contribution.
@@ -5833,9 +5835,7 @@ void mem_cgroup_swapout(struct page *page, swp_entry_t entry)
        if (!mem_cgroup_is_root(memcg))
                page_counter_uncharge(&memcg->memory, 1);
 
-       /* XXX: caller holds IRQ-safe mapping->tree_lock */
-       VM_BUG_ON(!irqs_disabled());
-
+       /* Caller disabled preemption with mapping->tree_lock */
        mem_cgroup_charge_statistics(memcg, page, -1);
        memcg_check_events(memcg, page);
 }
index 22e037e3364e0f49dcdc1de812181f54755bbbac..17734c3c1183ed799257d40eef18da55d809dd4a 100644 (file)
@@ -3737,7 +3737,7 @@ void print_vma_addr(char *prefix, unsigned long ip)
 }
 
 #if defined(CONFIG_PROVE_LOCKING) || defined(CONFIG_DEBUG_ATOMIC_SLEEP)
-void might_fault(void)
+void __might_fault(const char *file, int line)
 {
        /*
         * Some code (nfs/sunrpc) uses socket ops on kernel memory while
@@ -3747,21 +3747,15 @@ void might_fault(void)
         */
        if (segment_eq(get_fs(), KERNEL_DS))
                return;
-
-       /*
-        * it would be nicer only to annotate paths which are not under
-        * pagefault_disable, however that requires a larger audit and
-        * providing helpers like get_user_atomic.
-        */
-       if (in_atomic())
+       if (pagefault_disabled())
                return;
-
-       __might_sleep(__FILE__, __LINE__, 0);
-
+       __might_sleep(file, line, 0);
+#if defined(CONFIG_DEBUG_ATOMIC_SLEEP)
        if (current->mm)
                might_lock_read(&current->mm->mmap_sem);
+#endif
 }
-EXPORT_SYMBOL(might_fault);
+EXPORT_SYMBOL(__might_fault);
 #endif
 
 #if defined(CONFIG_TRANSPARENT_HUGEPAGE) || defined(CONFIG_HUGETLBFS)
index 457bde530cbedcf0dea2f35e219466de0acf204d..9e88f749aa512395daea45f2727545fa0f281533 100644 (file)
@@ -1969,8 +1969,10 @@ void try_offline_node(int nid)
                 * wait_table may be allocated from boot memory,
                 * here only free if it's allocated by vmalloc.
                 */
-               if (is_vmalloc_addr(zone->wait_table))
+               if (is_vmalloc_addr(zone->wait_table)) {
                        vfree(zone->wait_table);
+                       zone->wait_table = NULL;
+               }
        }
 }
 EXPORT_SYMBOL(try_offline_node);
index ede26291d4aa92ad120bfd006786414fd6d45c56..747743237d9f4d3ead6117d4ee152c00659cd362 100644 (file)
@@ -2518,7 +2518,7 @@ static void __init check_numabalancing_enable(void)
        if (numabalancing_override)
                set_numabalancing_state(numabalancing_override == 1);
 
-       if (nr_node_ids > 1 && !numabalancing_override) {
+       if (num_online_nodes() > 1 && !numabalancing_override) {
                pr_info("%s automatic NUMA balancing. "
                        "Configure with numa_balancing= or the "
                        "kernel.numa_balancing sysctl",
index 5daf5568b9e149ea9dce0383b0452bd30ad67f84..eb59f7eea50827fc09e1c4f7a432b59ff2241d17 100644 (file)
@@ -580,7 +580,7 @@ static long long pos_ratio_polynom(unsigned long setpoint,
        long x;
 
        x = div64_s64(((s64)setpoint - (s64)dirty) << RATELIMIT_CALC_SHIFT,
-                   limit - setpoint + 1);
+                     (limit - setpoint) | 1);
        pos_ratio = x;
        pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
        pos_ratio = pos_ratio * x >> RATELIMIT_CALC_SHIFT;
@@ -807,7 +807,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
         * scale global setpoint to bdi's:
         *      bdi_setpoint = setpoint * bdi_thresh / thresh
         */
-       x = div_u64((u64)bdi_thresh << 16, thresh + 1);
+       x = div_u64((u64)bdi_thresh << 16, thresh | 1);
        bdi_setpoint = setpoint * (u64)x >> 16;
        /*
         * Use span=(8*write_bw) in single bdi case as indicated by
@@ -822,7 +822,7 @@ static unsigned long bdi_position_ratio(struct backing_dev_info *bdi,
 
        if (bdi_dirty < x_intercept - span / 4) {
                pos_ratio = div64_u64(pos_ratio * (x_intercept - bdi_dirty),
-                                   x_intercept - bdi_setpoint + 1);
+                                     (x_intercept - bdi_setpoint) | 1);
        } else
                pos_ratio /= 4;
 
index 755a42c76eb4747623da51acdeb780b322b5ac06..303c908790efca6f7d0b30cc6d8a9db918085e10 100644 (file)
@@ -101,7 +101,8 @@ void unset_migratetype_isolate(struct page *page, unsigned migratetype)
                        buddy_idx = __find_buddy_index(page_idx, order);
                        buddy = page + (buddy_idx - page_idx);
 
-                       if (!is_migrate_isolate_page(buddy)) {
+                       if (pfn_valid_within(page_to_pfn(buddy)) &&
+                           !is_migrate_isolate_page(buddy)) {
                                __isolate_free_page(page, order);
                                kernel_map_pages(page, (1 << order), 1);
                                set_page_refcounted(page);
index de981370fbc5d596de3d419062c0977829602af7..3759099d8ce438f57398d84f50049b9e8821bd3a 100644 (file)
@@ -2451,6 +2451,7 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
                        return -ENOMEM;
                }
                inode->i_op = &shmem_short_symlink_operations;
+               inode->i_link = info->symlink;
        } else {
                error = shmem_getpage(inode, 0, &page, SGP_WRITE, NULL);
                if (error) {
@@ -2474,30 +2475,23 @@ static int shmem_symlink(struct inode *dir, struct dentry *dentry, const char *s
        return 0;
 }
 
-static void *shmem_follow_short_symlink(struct dentry *dentry, struct nameidata *nd)
-{
-       nd_set_link(nd, SHMEM_I(d_inode(dentry))->symlink);
-       return NULL;
-}
-
-static void *shmem_follow_link(struct dentry *dentry, struct nameidata *nd)
+static const char *shmem_follow_link(struct dentry *dentry, void **cookie)
 {
        struct page *page = NULL;
        int error = shmem_getpage(d_inode(dentry), 0, &page, SGP_READ, NULL);
-       nd_set_link(nd, error ? ERR_PTR(error) : kmap(page));
-       if (page)
-               unlock_page(page);
-       return page;
+       if (error)
+               return ERR_PTR(error);
+       unlock_page(page);
+       *cookie = page;
+       return kmap(page);
 }
 
-static void shmem_put_link(struct dentry *dentry, struct nameidata *nd, void *cookie)
+static void shmem_put_link(struct inode *unused, void *cookie)
 {
-       if (!IS_ERR(nd_get_link(nd))) {
-               struct page *page = cookie;
-               kunmap(page);
-               mark_page_accessed(page);
-               page_cache_release(page);
-       }
+       struct page *page = cookie;
+       kunmap(page);
+       mark_page_accessed(page);
+       page_cache_release(page);
 }
 
 #ifdef CONFIG_TMPFS_XATTR
@@ -2642,7 +2636,7 @@ static ssize_t shmem_listxattr(struct dentry *dentry, char *buffer, size_t size)
 
 static const struct inode_operations shmem_short_symlink_operations = {
        .readlink       = generic_readlink,
-       .follow_link    = shmem_follow_short_symlink,
+       .follow_link    = simple_follow_link,
 #ifdef CONFIG_TMPFS_XATTR
        .setxattr       = shmem_setxattr,
        .getxattr       = shmem_getxattr,
@@ -3401,7 +3395,13 @@ int shmem_zero_setup(struct vm_area_struct *vma)
        struct file *file;
        loff_t size = vma->vm_end - vma->vm_start;
 
-       file = shmem_file_setup("dev/zero", size, vma->vm_flags);
+       /*
+        * Cloning a new file under mmap_sem leads to a lock ordering conflict
+        * between XFS directory reading and selinux: since this file is only
+        * accessible to the user through its mapping, use S_PRIVATE flag to
+        * bypass file security, in the same way as shmem_kernel_file_setup().
+        */
+       file = __shmem_file_setup("dev/zero", size, vma->vm_flags, S_PRIVATE);
        if (IS_ERR(file))
                return PTR_ERR(file);
 
index 08bd7a3d464a9c6959a39e269d2284600e750a50..a8b5e749e84e7dbd50d325eecf84a47316145598 100644 (file)
@@ -289,7 +289,8 @@ static int create_handle_cache(struct zs_pool *pool)
 
 static void destroy_handle_cache(struct zs_pool *pool)
 {
-       kmem_cache_destroy(pool->handle_cachep);
+       if (pool->handle_cachep)
+               kmem_cache_destroy(pool->handle_cachep);
 }
 
 static unsigned long alloc_handle(struct zs_pool *pool)
index 98a30a5b866472b7421f5394636934bc23ec7f94..59555f0f8fc85b039cab18350906ae3c86a477af 100644 (file)
@@ -443,7 +443,7 @@ static int vlan_device_event(struct notifier_block *unused, unsigned long event,
        case NETDEV_UP:
                /* Put all VLANs for this dev in the up state too.  */
                vlan_group_for_each_dev(grp, i, vlandev) {
-                       flgs = vlandev->flags;
+                       flgs = dev_get_flags(vlandev);
                        if (flgs & IFF_UP)
                                continue;
 
index 476709bd068a474f7edcac83a4869849ccfb4b17..c4802f3bd4c51086de62c048858fb7f6057f3bbb 100644 (file)
@@ -1557,7 +1557,8 @@ static int hci_dev_do_close(struct hci_dev *hdev)
 {
        BT_DBG("%s %p", hdev->name, hdev);
 
-       if (!hci_dev_test_flag(hdev, HCI_UNREGISTER)) {
+       if (!hci_dev_test_flag(hdev, HCI_UNREGISTER) &&
+           test_bit(HCI_UP, &hdev->flags)) {
                /* Execute vendor specific shutdown routine */
                if (hdev->shutdown)
                        hdev->shutdown(hdev);
@@ -2853,9 +2854,11 @@ static void le_scan_disable_work_complete(struct hci_dev *hdev, u8 status,
                         * state. If we were running both LE and BR/EDR inquiry
                         * simultaneously, and BR/EDR inquiry is already
                         * finished, stop discovery, otherwise BR/EDR inquiry
-                        * will stop discovery when finished.
+                        * will stop discovery when finished. If we will resolve
+                        * remote device name, do not change discovery state.
                         */
-                       if (!test_bit(HCI_INQUIRY, &hdev->flags))
+                       if (!test_bit(HCI_INQUIRY, &hdev->flags) &&
+                           hdev->discovery.state != DISCOVERY_RESOLVING)
                                hci_discovery_set_state(hdev,
                                                        DISCOVERY_STOPPED);
                } else {
index e0670d7054f97c05d46b74952ee53d6fa6910776..659fb96672e41e2e6525323697ca23a41d271fbb 100644 (file)
@@ -796,9 +796,11 @@ static int __br_fdb_add(struct ndmsg *ndm, struct net_bridge_port *p,
        int err = 0;
 
        if (ndm->ndm_flags & NTF_USE) {
+               local_bh_disable();
                rcu_read_lock();
                br_fdb_update(p->br, p, addr, vid, true);
                rcu_read_unlock();
+               local_bh_enable();
        } else {
                spin_lock_bh(&p->br->hash_lock);
                err = fdb_add_entry(p, addr, ndm->ndm_state,
index 4b6722f8f1790811d2ef4b9b1ae8839628b745c8..ff667e18b2d6313f0a806752a4ef88435e939c4d 100644 (file)
@@ -1072,7 +1072,7 @@ static int br_ip6_multicast_mld2_report(struct net_bridge *br,
 
                err = br_ip6_multicast_add_group(br, port, &grec->grec_mca,
                                                 vid);
-               if (!err)
+               if (err)
                        break;
        }
 
@@ -1167,6 +1167,9 @@ static void br_multicast_add_router(struct net_bridge *br,
        struct net_bridge_port *p;
        struct hlist_node *slot = NULL;
 
+       if (!hlist_unhashed(&port->rlist))
+               return;
+
        hlist_for_each_entry(p, &br->router_list, rlist) {
                if ((unsigned long) port >= (unsigned long) p)
                        break;
@@ -1194,12 +1197,8 @@ static void br_multicast_mark_router(struct net_bridge *br,
        if (port->multicast_router != 1)
                return;
 
-       if (!hlist_unhashed(&port->rlist))
-               goto timer;
-
        br_multicast_add_router(br, port);
 
-timer:
        mod_timer(&port->multicast_router_timer,
                  now + br->multicast_querier_interval);
 }
@@ -1822,7 +1821,7 @@ static void br_multicast_query_expired(struct net_bridge *br,
        if (query->startup_sent < br->multicast_startup_query_count)
                query->startup_sent++;
 
-       RCU_INIT_POINTER(querier, NULL);
+       RCU_INIT_POINTER(querier->port, NULL);
        br_multicast_send_query(br, NULL, query);
        spin_unlock(&br->multicast_lock);
 }
index ab55e2472beb0e44dece07e327f2e0eb8d3f502c..60ddfbeb47f598fed5908dfc492abef48c0acd75 100644 (file)
 #include <net/route.h>
 #include <net/netfilter/br_netfilter.h>
 
-#if IS_ENABLED(CONFIG_NF_CONNTRACK)
-#include <net/netfilter/nf_conntrack.h>
-#endif
-
 #include <asm/uaccess.h>
 #include "br_private.h"
 #ifdef CONFIG_SYSCTL
@@ -350,24 +346,15 @@ free_skb:
        return 0;
 }
 
-static bool dnat_took_place(const struct sk_buff *skb)
+static bool daddr_was_changed(const struct sk_buff *skb,
+                             const struct nf_bridge_info *nf_bridge)
 {
-#if IS_ENABLED(CONFIG_NF_CONNTRACK)
-       enum ip_conntrack_info ctinfo;
-       struct nf_conn *ct;
-
-       ct = nf_ct_get(skb, &ctinfo);
-       if (!ct || nf_ct_is_untracked(ct))
-               return false;
-
-       return test_bit(IPS_DST_NAT_BIT, &ct->status);
-#else
-       return false;
-#endif
+       return ip_hdr(skb)->daddr != nf_bridge->ipv4_daddr;
 }
 
 /* This requires some explaining. If DNAT has taken place,
  * we will need to fix up the destination Ethernet address.
+ * This is also true when SNAT takes place (for the reply direction).
  *
  * There are two cases to consider:
  * 1. The packet was DNAT'ed to a device in the same bridge
@@ -421,7 +408,7 @@ static int br_nf_pre_routing_finish(struct sock *sk, struct sk_buff *skb)
                nf_bridge->pkt_otherhost = false;
        }
        nf_bridge->mask ^= BRNF_NF_BRIDGE_PREROUTING;
-       if (dnat_took_place(skb)) {
+       if (daddr_was_changed(skb, nf_bridge)) {
                if ((err = ip_route_input(skb, iph->daddr, iph->saddr, iph->tos, dev))) {
                        struct in_device *in_dev = __in_dev_get_rcu(dev);
 
@@ -632,6 +619,7 @@ static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
                                      struct sk_buff *skb,
                                      const struct nf_hook_state *state)
 {
+       struct nf_bridge_info *nf_bridge;
        struct net_bridge_port *p;
        struct net_bridge *br;
        __u32 len = nf_bridge_encap_header_len(skb);
@@ -669,6 +657,9 @@ static unsigned int br_nf_pre_routing(const struct nf_hook_ops *ops,
        if (!setup_pre_routing(skb))
                return NF_DROP;
 
+       nf_bridge = nf_bridge_info_get(skb);
+       nf_bridge->ipv4_daddr = ip_hdr(skb)->daddr;
+
        skb->protocol = htons(ETH_P_IP);
 
        NF_HOOK(NFPROTO_IPV4, NF_INET_PRE_ROUTING, state->sk, skb,
index 4fcaa67750fda845ad0a180332c4cd96a9524086..7caf7fae2d5b8aa369b924e1c87a47c343fb8954 100644 (file)
@@ -97,7 +97,9 @@ static void br_forward_delay_timer_expired(unsigned long arg)
                netif_carrier_on(br->dev);
        }
        br_log_state(p);
+       rcu_read_lock();
        br_ifinfo_notify(RTM_NEWLINK, p);
+       rcu_read_unlock();
        spin_unlock(&br->lock);
 }
 
index 4ec0c803aef112196657503cd615fe7a83e800bb..112ad784838a5bf6b46eed6c2b90f2d8b0e50d7a 100644 (file)
@@ -330,6 +330,10 @@ static long caif_stream_data_wait(struct sock *sk, long timeo)
                release_sock(sk);
                timeo = schedule_timeout(timeo);
                lock_sock(sk);
+
+               if (sock_flag(sk, SOCK_DEAD))
+                       break;
+
                clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
        }
 
@@ -373,6 +377,10 @@ static int caif_stream_recvmsg(struct socket *sock, struct msghdr *msg,
                struct sk_buff *skb;
 
                lock_sock(sk);
+               if (sock_flag(sk, SOCK_DEAD)) {
+                       err = -ECONNRESET;
+                       goto unlock;
+               }
                skb = skb_dequeue(&sk->sk_receive_queue);
                caif_check_flow_release(sk);
 
index 41a4abc7e98eebfd36487d6d381f680732d4cd68..c4ec9239249ae6541a8ee378f95230a42c2f3a3d 100644 (file)
@@ -1306,8 +1306,6 @@ static void __unregister_linger_request(struct ceph_osd_client *osdc,
                if (list_empty(&req->r_osd_item))
                        req->r_osd = NULL;
        }
-
-       list_del_init(&req->r_req_lru_item); /* can be on notarget */
        ceph_osdc_put_request(req);
 }
 
@@ -2017,20 +2015,29 @@ static void kick_requests(struct ceph_osd_client *osdc, bool force_resend,
                err = __map_request(osdc, req,
                                    force_resend || force_resend_writes);
                dout("__map_request returned %d\n", err);
-               if (err == 0)
-                       continue;  /* no change and no osd was specified */
                if (err < 0)
                        continue;  /* hrm! */
-               if (req->r_osd == NULL) {
-                       dout("tid %llu maps to no valid osd\n", req->r_tid);
-                       needmap++;  /* request a newer map */
-                       continue;
-               }
+               if (req->r_osd == NULL || err > 0) {
+                       if (req->r_osd == NULL) {
+                               dout("lingering %p tid %llu maps to no osd\n",
+                                    req, req->r_tid);
+                               /*
+                                * A homeless lingering request makes
+                                * no sense, as it's job is to keep
+                                * a particular OSD connection open.
+                                * Request a newer map and kick the
+                                * request, knowing that it won't be
+                                * resent until we actually get a map
+                                * that can tell us where to send it.
+                                */
+                               needmap++;
+                       }
 
-               dout("kicking lingering %p tid %llu osd%d\n", req, req->r_tid,
-                    req->r_osd ? req->r_osd->o_osd : -1);
-               __register_request(osdc, req);
-               __unregister_linger_request(osdc, req);
+                       dout("kicking lingering %p tid %llu osd%d\n", req,
+                            req->r_tid, req->r_osd ? req->r_osd->o_osd : -1);
+                       __register_request(osdc, req);
+                       __unregister_linger_request(osdc, req);
+               }
        }
        reset_changed_osds(osdc);
        mutex_unlock(&osdc->request_mutex);
index c7ba0388f1be8e37e780f27e0b3bd95f45dd0ade..aa82f9ab6a36d164769bf7c9633fcdfd5971466f 100644 (file)
@@ -1718,15 +1718,8 @@ EXPORT_SYMBOL_GPL(is_skb_forwardable);
 
 int __dev_forward_skb(struct net_device *dev, struct sk_buff *skb)
 {
-       if (skb_shinfo(skb)->tx_flags & SKBTX_DEV_ZEROCOPY) {
-               if (skb_copy_ubufs(skb, GFP_ATOMIC)) {
-                       atomic_long_inc(&dev->rx_dropped);
-                       kfree_skb(skb);
-                       return NET_RX_DROP;
-               }
-       }
-
-       if (unlikely(!is_skb_forwardable(dev, skb))) {
+       if (skb_orphan_frags(skb, GFP_ATOMIC) ||
+           unlikely(!is_skb_forwardable(dev, skb))) {
                atomic_long_inc(&dev->rx_dropped);
                kfree_skb(skb);
                return NET_RX_DROP;
@@ -5209,7 +5202,7 @@ static int __netdev_upper_dev_link(struct net_device *dev,
        if (__netdev_find_adj(upper_dev, dev, &upper_dev->all_adj_list.upper))
                return -EBUSY;
 
-       if (__netdev_find_adj(dev, upper_dev, &dev->all_adj_list.upper))
+       if (__netdev_find_adj(dev, upper_dev, &dev->adj_list.upper))
                return -EEXIST;
 
        if (master && netdev_master_upper_dev_get(dev))
index 78fc04ad36fc03a0f1737ee484a08e1824b324dc..572af0011997a2057f30ba0b5022760e11493d98 100644 (file)
@@ -601,7 +601,7 @@ static int rtnl_net_getid(struct sk_buff *skb, struct nlmsghdr *nlh)
        }
 
        err = rtnl_net_fill(msg, NETLINK_CB(skb).portid, nlh->nlmsg_seq, 0,
-                           RTM_GETNSID, net, peer, -1);
+                           RTM_NEWNSID, net, peer, -1);
        if (err < 0)
                goto err_out;
 
index 508155b283ddcc73a967a2bc8068e67cb8cada7d..54817d365366f8f7b408f0b790d9c92628147026 100644 (file)
@@ -2212,8 +2212,6 @@ static void spin(struct pktgen_dev *pkt_dev, ktime_t spin_until)
                do {
                        set_current_state(TASK_INTERRUPTIBLE);
                        hrtimer_start_expires(&t.timer, HRTIMER_MODE_ABS);
-                       if (!hrtimer_active(&t.timer))
-                               t.task = NULL;
 
                        if (likely(t.task))
                                schedule();
index 666e0928ba404b85cf210749e505a2c205e5ee3f..8de36824018de4da2369fb02234692c4e0260b27 100644 (file)
@@ -2416,6 +2416,9 @@ void rtmsg_ifinfo(int type, struct net_device *dev, unsigned int change,
 {
        struct sk_buff *skb;
 
+       if (dev->reg_state != NETREG_REGISTERED)
+               return;
+
        skb = rtmsg_ifinfo_build_skb(type, dev, change, flags);
        if (skb)
                rtmsg_ifinfo_send(skb, dev, flags);
index 3cfff2a3d651fb7d7cd2baaa3698c123eb7fc00f..41ec02242ea7c2ff57a6b506b685df22c62f3dcc 100644 (file)
@@ -4398,7 +4398,7 @@ struct sk_buff *alloc_skb_with_frags(unsigned long header_len,
 
                while (order) {
                        if (npages >= 1 << order) {
-                               page = alloc_pages(gfp_mask |
+                               page = alloc_pages((gfp_mask & ~__GFP_WAIT) |
                                                   __GFP_COMP |
                                                   __GFP_NOWARN |
                                                   __GFP_NORETRY,
index e891bcf325ca759c9b7498f29ec76aa946198d5e..dc30dc5bb1b892923397fee073d42e9e5ef53a7e 100644 (file)
@@ -354,15 +354,12 @@ void sk_clear_memalloc(struct sock *sk)
 
        /*
         * SOCK_MEMALLOC is allowed to ignore rmem limits to ensure forward
-        * progress of swapping. However, if SOCK_MEMALLOC is cleared while
-        * it has rmem allocations there is a risk that the user of the
-        * socket cannot make forward progress due to exceeding the rmem
-        * limits. By rights, sk_clear_memalloc() should only be called
-        * on sockets being torn down but warn and reset the accounting if
-        * that assumption breaks.
+        * progress of swapping. SOCK_MEMALLOC may be cleared while
+        * it has rmem allocations due to the last swapfile being deactivated
+        * but there is a risk that the socket is unusable due to exceeding
+        * the rmem limits. Reclaim the reserves and obey rmem limits again.
         */
-       if (WARN_ON(sk->sk_forward_alloc))
-               sk_mem_reclaim(sk);
+       sk_mem_reclaim(sk);
 }
 EXPORT_SYMBOL_GPL(sk_clear_memalloc);
 
@@ -1474,8 +1471,8 @@ void sk_release_kernel(struct sock *sk)
                return;
 
        sock_hold(sk);
-       sock_net_set(sk, get_net(&init_net));
        sock_release(sk->sk_socket);
+       sock_net_set(sk, get_net(&init_net));
        sock_put(sk);
 }
 EXPORT_SYMBOL(sk_release_kernel);
@@ -1883,7 +1880,7 @@ bool skb_page_frag_refill(unsigned int sz, struct page_frag *pfrag, gfp_t gfp)
 
        pfrag->offset = 0;
        if (SKB_FRAG_PAGE_ORDER) {
-               pfrag->page = alloc_pages(gfp | __GFP_COMP |
+               pfrag->page = alloc_pages((gfp & ~__GFP_WAIT) | __GFP_COMP |
                                          __GFP_NOWARN | __GFP_NORETRY,
                                          SKB_FRAG_PAGE_ORDER);
                if (likely(pfrag->page)) {
index e6f6cc3a1bcf45ee6fa49d6cfe840f58660e3511..392e29a0227dbf4aa4870d73c5ef333db528b675 100644 (file)
@@ -359,7 +359,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
         */
        ds = kzalloc(sizeof(*ds) + drv->priv_size, GFP_KERNEL);
        if (ds == NULL)
-               return NULL;
+               return ERR_PTR(-ENOMEM);
 
        ds->dst = dst;
        ds->index = index;
@@ -370,7 +370,7 @@ dsa_switch_setup(struct dsa_switch_tree *dst, int index,
 
        ret = dsa_switch_setup_one(ds, parent);
        if (ret)
-               return NULL;
+               return ERR_PTR(ret);
 
        return ds;
 }
index 05dab2957cd49e9be95c6bbe1cef1eb9f35ab8a2..4adfd4d5471b83c254313a887687cb497ce7ad3a 100644 (file)
@@ -3,7 +3,9 @@ obj-$(CONFIG_IEEE802154_SOCKET) += ieee802154_socket.o
 obj-y += 6lowpan/
 
 ieee802154-y := netlink.o nl-mac.o nl-phy.o nl_policy.o core.o \
-                header_ops.o sysfs.o nl802154.o
+                header_ops.o sysfs.o nl802154.o trace.o
 ieee802154_socket-y := socket.o
 
+CFLAGS_trace.o := -I$(src)
+
 ccflags-y += -D__CHECK_ENDIAN__
index 1b9d25f6e898616d7972950692bcd1eab71ddb26..346c6665d25e59bf372bacedc5a2ae6df30d227c 100644 (file)
@@ -175,6 +175,7 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
        int rc = -ENOBUFS;
        struct net_device *dev;
        int type = __IEEE802154_DEV_INVALID;
+       unsigned char name_assign_type;
 
        pr_debug("%s\n", __func__);
 
@@ -190,8 +191,10 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
                if (devname[nla_len(info->attrs[IEEE802154_ATTR_DEV_NAME]) - 1]
                                != '\0')
                        return -EINVAL; /* phy name should be null-terminated */
+               name_assign_type = NET_NAME_USER;
        } else  {
                devname = "wpan%d";
+               name_assign_type = NET_NAME_ENUM;
        }
 
        if (strlen(devname) >= IFNAMSIZ)
@@ -221,7 +224,7 @@ int ieee802154_add_iface(struct sk_buff *skb, struct genl_info *info)
        }
 
        dev = rdev_add_virtual_intf_deprecated(wpan_phy_to_rdev(phy), devname,
-                                              type);
+                                              name_assign_type, type);
        if (IS_ERR(dev)) {
                rc = PTR_ERR(dev);
                goto nla_put_failure;
index a4daf91b8d0a395d6964dad93cf292e566dee22c..f3c12f6a4a392ad301e5a79fcabb2bb9f8521431 100644 (file)
@@ -589,7 +589,7 @@ static int nl802154_new_interface(struct sk_buff *skb, struct genl_info *info)
 
        return rdev_add_virtual_intf(rdev,
                                     nla_data(info->attrs[NL802154_ATTR_IFNAME]),
-                                    type, extended_addr);
+                                    NET_NAME_USER, type, extended_addr);
 }
 
 static int nl802154_del_interface(struct sk_buff *skb, struct genl_info *info)
index 7c46732fad2bdd3f6778fce066cb89330b5aeaa3..7b5a9dd94fe5a2b55d01103aa529261141ece521 100644 (file)
@@ -4,13 +4,16 @@
 #include <net/cfg802154.h>
 
 #include "core.h"
+#include "trace.h"
 
 static inline struct net_device *
 rdev_add_virtual_intf_deprecated(struct cfg802154_registered_device *rdev,
-                                const char *name, int type)
+                                const char *name,
+                                unsigned char name_assign_type,
+                                int type)
 {
        return rdev->ops->add_virtual_intf_deprecated(&rdev->wpan_phy, name,
-                                                     type);
+                                                     name_assign_type, type);
 }
 
 static inline void
@@ -22,75 +25,131 @@ rdev_del_virtual_intf_deprecated(struct cfg802154_registered_device *rdev,
 
 static inline int
 rdev_add_virtual_intf(struct cfg802154_registered_device *rdev, char *name,
+                     unsigned char name_assign_type,
                      enum nl802154_iftype type, __le64 extended_addr)
 {
-       return rdev->ops->add_virtual_intf(&rdev->wpan_phy, name, type,
+       int ret;
+
+       trace_802154_rdev_add_virtual_intf(&rdev->wpan_phy, name, type,
                                           extended_addr);
+       ret = rdev->ops->add_virtual_intf(&rdev->wpan_phy, name,
+                                         name_assign_type, type,
+                                         extended_addr);
+       trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+       return ret;
 }
 
 static inline int
 rdev_del_virtual_intf(struct cfg802154_registered_device *rdev,
                      struct wpan_dev *wpan_dev)
 {
-       return rdev->ops->del_virtual_intf(&rdev->wpan_phy, wpan_dev);
+       int ret;
+
+       trace_802154_rdev_del_virtual_intf(&rdev->wpan_phy, wpan_dev);
+       ret = rdev->ops->del_virtual_intf(&rdev->wpan_phy, wpan_dev);
+       trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+       return ret;
 }
 
 static inline int
 rdev_set_channel(struct cfg802154_registered_device *rdev, u8 page, u8 channel)
 {
-       return rdev->ops->set_channel(&rdev->wpan_phy, page, channel);
+       int ret;
+
+       trace_802154_rdev_set_channel(&rdev->wpan_phy, page, channel);
+       ret = rdev->ops->set_channel(&rdev->wpan_phy, page, channel);
+       trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+       return ret;
 }
 
 static inline int
 rdev_set_cca_mode(struct cfg802154_registered_device *rdev,
                  const struct wpan_phy_cca *cca)
 {
-       return rdev->ops->set_cca_mode(&rdev->wpan_phy, cca);
+       int ret;
+
+       trace_802154_rdev_set_cca_mode(&rdev->wpan_phy, cca);
+       ret = rdev->ops->set_cca_mode(&rdev->wpan_phy, cca);
+       trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+       return ret;
 }
 
 static inline int
 rdev_set_pan_id(struct cfg802154_registered_device *rdev,
                struct wpan_dev *wpan_dev, __le16 pan_id)
 {
-       return rdev->ops->set_pan_id(&rdev->wpan_phy, wpan_dev, pan_id);
+       int ret;
+
+       trace_802154_rdev_set_pan_id(&rdev->wpan_phy, wpan_dev, pan_id);
+       ret = rdev->ops->set_pan_id(&rdev->wpan_phy, wpan_dev, pan_id);
+       trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+       return ret;
 }
 
 static inline int
 rdev_set_short_addr(struct cfg802154_registered_device *rdev,
                    struct wpan_dev *wpan_dev, __le16 short_addr)
 {
-       return rdev->ops->set_short_addr(&rdev->wpan_phy, wpan_dev, short_addr);
+       int ret;
+
+       trace_802154_rdev_set_short_addr(&rdev->wpan_phy, wpan_dev, short_addr);
+       ret = rdev->ops->set_short_addr(&rdev->wpan_phy, wpan_dev, short_addr);
+       trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+       return ret;
 }
 
 static inline int
 rdev_set_backoff_exponent(struct cfg802154_registered_device *rdev,
                          struct wpan_dev *wpan_dev, u8 min_be, u8 max_be)
 {
-       return rdev->ops->set_backoff_exponent(&rdev->wpan_phy, wpan_dev,
+       int ret;
+
+       trace_802154_rdev_set_backoff_exponent(&rdev->wpan_phy, wpan_dev,
                                               min_be, max_be);
+       ret = rdev->ops->set_backoff_exponent(&rdev->wpan_phy, wpan_dev,
+                                             min_be, max_be);
+       trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+       return ret;
 }
 
 static inline int
 rdev_set_max_csma_backoffs(struct cfg802154_registered_device *rdev,
                           struct wpan_dev *wpan_dev, u8 max_csma_backoffs)
 {
-       return rdev->ops->set_max_csma_backoffs(&rdev->wpan_phy, wpan_dev,
-                                               max_csma_backoffs);
+       int ret;
+
+       trace_802154_rdev_set_csma_backoffs(&rdev->wpan_phy, wpan_dev,
+                                           max_csma_backoffs);
+       ret = rdev->ops->set_max_csma_backoffs(&rdev->wpan_phy, wpan_dev,
+                                              max_csma_backoffs);
+       trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+       return ret;
 }
 
 static inline int
 rdev_set_max_frame_retries(struct cfg802154_registered_device *rdev,
                           struct wpan_dev *wpan_dev, s8 max_frame_retries)
 {
-       return rdev->ops->set_max_frame_retries(&rdev->wpan_phy, wpan_dev,
+       int ret;
+
+       trace_802154_rdev_set_max_frame_retries(&rdev->wpan_phy, wpan_dev,
                                                max_frame_retries);
+       ret = rdev->ops->set_max_frame_retries(&rdev->wpan_phy, wpan_dev,
+                                              max_frame_retries);
+       trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+       return ret;
 }
 
 static inline int
 rdev_set_lbt_mode(struct cfg802154_registered_device *rdev,
                  struct wpan_dev *wpan_dev, bool mode)
 {
-       return rdev->ops->set_lbt_mode(&rdev->wpan_phy, wpan_dev, mode);
+       int ret;
+
+       trace_802154_rdev_set_lbt_mode(&rdev->wpan_phy, wpan_dev, mode);
+       ret = rdev->ops->set_lbt_mode(&rdev->wpan_phy, wpan_dev, mode);
+       trace_802154_rdev_return_int(&rdev->wpan_phy, ret);
+       return ret;
 }
 
 #endif /* __CFG802154_RDEV_OPS */
diff --git a/net/ieee802154/trace.c b/net/ieee802154/trace.c
new file mode 100644 (file)
index 0000000..95f997f
--- /dev/null
@@ -0,0 +1,7 @@
+#include <linux/module.h>
+
+#ifndef __CHECKER__
+#define CREATE_TRACE_POINTS
+#include "trace.h"
+
+#endif
diff --git a/net/ieee802154/trace.h b/net/ieee802154/trace.h
new file mode 100644 (file)
index 0000000..5ac25eb
--- /dev/null
@@ -0,0 +1,247 @@
+/* Based on net/wireless/tracing.h */
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cfg802154
+
+#if !defined(__RDEV_CFG802154_OPS_TRACE) || defined(TRACE_HEADER_MULTI_READ)
+#define __RDEV_CFG802154_OPS_TRACE
+
+#include <linux/tracepoint.h>
+
+#include <net/cfg802154.h>
+
+#define MAXNAME                32
+#define WPAN_PHY_ENTRY __array(char, wpan_phy_name, MAXNAME)
+#define WPAN_PHY_ASSIGN        strlcpy(__entry->wpan_phy_name,  \
+                               wpan_phy_name(wpan_phy), \
+                               MAXNAME)
+#define WPAN_PHY_PR_FMT        "%s"
+#define WPAN_PHY_PR_ARG        __entry->wpan_phy_name
+
+#define WPAN_DEV_ENTRY __field(u32, identifier)
+#define WPAN_DEV_ASSIGN        (__entry->identifier) = (!IS_ERR_OR_NULL(wpan_dev) \
+                                        ? wpan_dev->identifier : 0)
+#define WPAN_DEV_PR_FMT        "wpan_dev(%u)"
+#define WPAN_DEV_PR_ARG        (__entry->identifier)
+
+#define WPAN_CCA_ENTRY __field(enum nl802154_cca_modes, cca_mode) \
+                       __field(enum nl802154_cca_opts, cca_opt)
+#define WPAN_CCA_ASSIGN \
+       do {                                     \
+               (__entry->cca_mode) = cca->mode; \
+               (__entry->cca_opt) = cca->opt;   \
+       } while (0)
+#define WPAN_CCA_PR_FMT        "cca_mode: %d, cca_opt: %d"
+#define WPAN_CCA_PR_ARG __entry->cca_mode, __entry->cca_opt
+
+#define BOOL_TO_STR(bo) (bo) ? "true" : "false"
+
+/*************************************************************
+ *                     rdev->ops traces                     *
+ *************************************************************/
+
+TRACE_EVENT(802154_rdev_add_virtual_intf,
+       TP_PROTO(struct wpan_phy *wpan_phy, char *name,
+                enum nl802154_iftype type, __le64 extended_addr),
+       TP_ARGS(wpan_phy, name, type, extended_addr),
+       TP_STRUCT__entry(
+               WPAN_PHY_ENTRY
+               __string(vir_intf_name, name ? name : "<noname>")
+               __field(enum nl802154_iftype, type)
+               __field(__le64, extended_addr)
+       ),
+       TP_fast_assign(
+               WPAN_PHY_ASSIGN;
+               __assign_str(vir_intf_name, name ? name : "<noname>");
+               __entry->type = type;
+               __entry->extended_addr = extended_addr;
+       ),
+       TP_printk(WPAN_PHY_PR_FMT ", virtual intf name: %s, type: %d, ea %llx",
+                 WPAN_PHY_PR_ARG, __get_str(vir_intf_name), __entry->type,
+                 __le64_to_cpu(__entry->extended_addr))
+);
+
+TRACE_EVENT(802154_rdev_del_virtual_intf,
+       TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev),
+       TP_ARGS(wpan_phy, wpan_dev),
+       TP_STRUCT__entry(
+               WPAN_PHY_ENTRY
+               WPAN_DEV_ENTRY
+       ),
+       TP_fast_assign(
+               WPAN_PHY_ASSIGN;
+               WPAN_DEV_ASSIGN;
+       ),
+       TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT, WPAN_PHY_PR_ARG,
+                 WPAN_DEV_PR_ARG)
+);
+
+TRACE_EVENT(802154_rdev_set_channel,
+       TP_PROTO(struct wpan_phy *wpan_phy, u8 page, u8 channel),
+       TP_ARGS(wpan_phy, page, channel),
+       TP_STRUCT__entry(
+               WPAN_PHY_ENTRY
+               __field(u8, page)
+               __field(u8, channel)
+       ),
+       TP_fast_assign(
+               WPAN_PHY_ASSIGN;
+               __entry->page = page;
+               __entry->channel = channel;
+       ),
+       TP_printk(WPAN_PHY_PR_FMT ", page: %d, channel: %d", WPAN_PHY_PR_ARG,
+                 __entry->page, __entry->channel)
+);
+
+TRACE_EVENT(802154_rdev_set_cca_mode,
+       TP_PROTO(struct wpan_phy *wpan_phy, const struct wpan_phy_cca *cca),
+       TP_ARGS(wpan_phy, cca),
+       TP_STRUCT__entry(
+               WPAN_PHY_ENTRY
+               WPAN_CCA_ENTRY
+       ),
+       TP_fast_assign(
+               WPAN_PHY_ASSIGN;
+               WPAN_CCA_ASSIGN;
+       ),
+       TP_printk(WPAN_PHY_PR_FMT ", " WPAN_CCA_PR_FMT, WPAN_PHY_PR_ARG,
+                 WPAN_CCA_PR_ARG)
+);
+
+DECLARE_EVENT_CLASS(802154_le16_template,
+       TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+                __le16 le16arg),
+       TP_ARGS(wpan_phy, wpan_dev, le16arg),
+       TP_STRUCT__entry(
+               WPAN_PHY_ENTRY
+               WPAN_DEV_ENTRY
+               __field(__le16, le16arg)
+       ),
+       TP_fast_assign(
+               WPAN_PHY_ASSIGN;
+               WPAN_DEV_ASSIGN;
+               __entry->le16arg = le16arg;
+       ),
+       TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT ", pan id: 0x%04x",
+                 WPAN_PHY_PR_ARG, WPAN_DEV_PR_ARG,
+                 __le16_to_cpu(__entry->le16arg))
+);
+
+DEFINE_EVENT(802154_le16_template, 802154_rdev_set_pan_id,
+       TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+                __le16 le16arg),
+       TP_ARGS(wpan_phy, wpan_dev, le16arg)
+);
+
+DEFINE_EVENT_PRINT(802154_le16_template, 802154_rdev_set_short_addr,
+       TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+                __le16 le16arg),
+       TP_ARGS(wpan_phy, wpan_dev, le16arg),
+       TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT ", sa: 0x%04x",
+                 WPAN_PHY_PR_ARG, WPAN_DEV_PR_ARG,
+                 __le16_to_cpu(__entry->le16arg))
+);
+
+TRACE_EVENT(802154_rdev_set_backoff_exponent,
+       TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+                u8 min_be, u8 max_be),
+       TP_ARGS(wpan_phy, wpan_dev, min_be, max_be),
+       TP_STRUCT__entry(
+               WPAN_PHY_ENTRY
+               WPAN_DEV_ENTRY
+               __field(u8, min_be)
+               __field(u8, max_be)
+       ),
+       TP_fast_assign(
+               WPAN_PHY_ASSIGN;
+               WPAN_DEV_ASSIGN;
+               __entry->min_be = min_be;
+               __entry->max_be = max_be;
+       ),
+
+       TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT
+                 ", min be: %d, max_be: %d", WPAN_PHY_PR_ARG,
+                 WPAN_DEV_PR_ARG, __entry->min_be, __entry->max_be)
+);
+
+TRACE_EVENT(802154_rdev_set_csma_backoffs,
+       TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+                u8 max_csma_backoffs),
+       TP_ARGS(wpan_phy, wpan_dev, max_csma_backoffs),
+       TP_STRUCT__entry(
+               WPAN_PHY_ENTRY
+               WPAN_DEV_ENTRY
+               __field(u8, max_csma_backoffs)
+       ),
+       TP_fast_assign(
+               WPAN_PHY_ASSIGN;
+               WPAN_DEV_ASSIGN;
+               __entry->max_csma_backoffs = max_csma_backoffs;
+       ),
+
+       TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT
+                 ", max csma backoffs: %d", WPAN_PHY_PR_ARG,
+                 WPAN_DEV_PR_ARG, __entry->max_csma_backoffs)
+);
+
+TRACE_EVENT(802154_rdev_set_max_frame_retries,
+       TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+                s8 max_frame_retries),
+       TP_ARGS(wpan_phy, wpan_dev, max_frame_retries),
+       TP_STRUCT__entry(
+               WPAN_PHY_ENTRY
+               WPAN_DEV_ENTRY
+               __field(s8, max_frame_retries)
+       ),
+       TP_fast_assign(
+               WPAN_PHY_ASSIGN;
+               WPAN_DEV_ASSIGN;
+               __entry->max_frame_retries = max_frame_retries;
+       ),
+
+       TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT
+                 ", max frame retries: %d", WPAN_PHY_PR_ARG,
+                 WPAN_DEV_PR_ARG, __entry->max_frame_retries)
+);
+
+TRACE_EVENT(802154_rdev_set_lbt_mode,
+       TP_PROTO(struct wpan_phy *wpan_phy, struct wpan_dev *wpan_dev,
+                bool mode),
+       TP_ARGS(wpan_phy, wpan_dev, mode),
+       TP_STRUCT__entry(
+               WPAN_PHY_ENTRY
+               WPAN_DEV_ENTRY
+               __field(bool, mode)
+       ),
+       TP_fast_assign(
+               WPAN_PHY_ASSIGN;
+               WPAN_DEV_ASSIGN;
+               __entry->mode = mode;
+       ),
+       TP_printk(WPAN_PHY_PR_FMT ", " WPAN_DEV_PR_FMT
+               ", lbt mode: %s", WPAN_PHY_PR_ARG,
+               WPAN_DEV_PR_ARG, BOOL_TO_STR(__entry->mode))
+);
+
+TRACE_EVENT(802154_rdev_return_int,
+       TP_PROTO(struct wpan_phy *wpan_phy, int ret),
+       TP_ARGS(wpan_phy, ret),
+       TP_STRUCT__entry(
+               WPAN_PHY_ENTRY
+               __field(int, ret)
+       ),
+       TP_fast_assign(
+               WPAN_PHY_ASSIGN;
+               __entry->ret = ret;
+       ),
+       TP_printk(WPAN_PHY_PR_FMT ", returned: %d", WPAN_PHY_PR_ARG,
+                 __entry->ret)
+);
+
+#endif /* !__RDEV_CFG802154_OPS_TRACE || TRACE_HEADER_MULTI_READ */
+
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE trace
+#include <trace/define_trace.h>
index 421a80b09b62358dad5a0fa35d99db73d28472a7..30b544f025acc09aaad99d9adc1e5dbc1227d307 100644 (file)
@@ -256,7 +256,8 @@ static int esp_output(struct xfrm_state *x, struct sk_buff *skb)
        aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
        aead_givcrypt_set_assoc(req, asg, assoclen);
        aead_givcrypt_set_giv(req, esph->enc_data,
-                             XFRM_SKB_CB(skb)->seq.output.low);
+                             XFRM_SKB_CB(skb)->seq.output.low +
+                             ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
 
        ESP_SKB_CB(skb)->tmp = tmp;
        err = crypto_aead_givencrypt(req);
index e13fcc602da20ee44dfd505ab1115bbcc0e13375..09b62e17dd8cba4b1041de5f208180d278010604 100644 (file)
@@ -1164,6 +1164,7 @@ int fib_table_insert(struct fib_table *tb, struct fib_config *cfg)
                        state = fa->fa_state;
                        new_fa->fa_state = state & ~FA_S_ACCESSED;
                        new_fa->fa_slen = fa->fa_slen;
+                       new_fa->tb_id = tb->tb_id;
 
                        err = netdev_switch_fib_ipv4_add(key, plen, fi,
                                                         new_fa->fa_tos,
@@ -1764,7 +1765,7 @@ void fib_table_flush_external(struct fib_table *tb)
                        /* record local slen */
                        slen = fa->fa_slen;
 
-                       if (!fi || !(fi->fib_flags & RTNH_F_EXTERNAL))
+                       if (!fi || !(fi->fib_flags & RTNH_F_OFFLOAD))
                                continue;
 
                        netdev_switch_fib_ipv4_del(n->key,
index bb77ebdae3b31bcacbfc4fdb350d4282e400e2e8..4d32262c7502cc22d13a9f3bd47ca72e8ee355e8 100644 (file)
@@ -224,14 +224,16 @@ int inet_sk_diag_fill(struct sock *sk, struct inet_connection_sock *icsk,
        handler->idiag_get_info(sk, r, info);
 
        if (sk->sk_state < TCP_TIME_WAIT) {
-               int err = 0;
+               union tcp_cc_info info;
+               size_t sz = 0;
+               int attr;
 
                rcu_read_lock();
                ca_ops = READ_ONCE(icsk->icsk_ca_ops);
                if (ca_ops && ca_ops->get_info)
-                       err = ca_ops->get_info(sk, ext, skb);
+                       sz = ca_ops->get_info(sk, ext, &attr, &info);
                rcu_read_unlock();
-               if (err < 0)
+               if (sz && nla_put(skb, attr, sz, &info) < 0)
                        goto errout;
        }
 
index 9f7269f3c54af2ecbc74db4ec2c0f71d5184dc1c..0c152087ca15dd3f97548d3c7123d42bd6626f0e 100644 (file)
@@ -65,7 +65,6 @@ static int vti_input(struct sk_buff *skb, int nexthdr, __be32 spi,
                        goto drop;
 
                XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4 = tunnel;
-               skb->mark = be32_to_cpu(tunnel->parms.i_key);
 
                return xfrm_input(skb, nexthdr, spi, encap_type);
        }
@@ -91,6 +90,8 @@ static int vti_rcv_cb(struct sk_buff *skb, int err)
        struct pcpu_sw_netstats *tstats;
        struct xfrm_state *x;
        struct ip_tunnel *tunnel = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4;
+       u32 orig_mark = skb->mark;
+       int ret;
 
        if (!tunnel)
                return 1;
@@ -107,7 +108,11 @@ static int vti_rcv_cb(struct sk_buff *skb, int err)
        x = xfrm_input_state(skb);
        family = x->inner_mode->afinfo->family;
 
-       if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family))
+       skb->mark = be32_to_cpu(tunnel->parms.i_key);
+       ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family);
+       skb->mark = orig_mark;
+
+       if (!ret)
                return -EPERM;
 
        skb_scrub_packet(skb, !net_eq(tunnel->net, dev_net(skb->dev)));
@@ -216,8 +221,6 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
 
        memset(&fl, 0, sizeof(fl));
 
-       skb->mark = be32_to_cpu(tunnel->parms.o_key);
-
        switch (skb->protocol) {
        case htons(ETH_P_IP):
                xfrm_decode_session(skb, &fl, AF_INET);
@@ -233,6 +236,9 @@ static netdev_tx_t vti_tunnel_xmit(struct sk_buff *skb, struct net_device *dev)
                return NETDEV_TX_OK;
        }
 
+       /* override mark with tunnel output key */
+       fl.flowi_mark = be32_to_cpu(tunnel->parms.o_key);
+
        return vti_xmit(skb, dev, &fl);
 }
 
index 13bfe84bf3ca5a6aafe6982b8782958b0cce529f..a61200754f4ba29301855ae67055dd11d004780b 100644 (file)
@@ -1075,6 +1075,9 @@ static int do_replace(struct net *net, const void __user *user,
        /* overflow check */
        if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
                return -ENOMEM;
+       if (tmp.num_counters == 0)
+               return -EINVAL;
+
        tmp.name[sizeof(tmp.name)-1] = 0;
 
        newinfo = xt_alloc_table_info(tmp.size);
@@ -1499,6 +1502,9 @@ static int compat_do_replace(struct net *net, void __user *user,
                return -ENOMEM;
        if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
                return -ENOMEM;
+       if (tmp.num_counters == 0)
+               return -EINVAL;
+
        tmp.name[sizeof(tmp.name)-1] = 0;
 
        newinfo = xt_alloc_table_info(tmp.size);
index c69db7fa25ee6376ee3f2bee87d4ce7f09105fb3..2d0e265fef6e7f2c657c54d4db0fd10e010fa68b 100644 (file)
@@ -1262,6 +1262,9 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
        /* overflow check */
        if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
                return -ENOMEM;
+       if (tmp.num_counters == 0)
+               return -EINVAL;
+
        tmp.name[sizeof(tmp.name)-1] = 0;
 
        newinfo = xt_alloc_table_info(tmp.size);
@@ -1809,6 +1812,9 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
                return -ENOMEM;
        if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
                return -ENOMEM;
+       if (tmp.num_counters == 0)
+               return -EINVAL;
+
        tmp.name[sizeof(tmp.name)-1] = 0;
 
        newinfo = xt_alloc_table_info(tmp.size);
index bff62fc87b8e266dbee59d43359fea8a77e29d60..f45f2a12f37b25b7270560498423df9488405b1d 100644 (file)
@@ -902,6 +902,10 @@ static int ip_error(struct sk_buff *skb)
        bool send;
        int code;
 
+       /* IP on this device is disabled. */
+       if (!in_dev)
+               goto out;
+
        net = dev_net(rt->dst.dev);
        if (!IN_DEV_FORWARD(in_dev)) {
                switch (rt->dst.error) {
index 8c5cd9efebbcfa877fedb49dda67e3d098ccbbc1..f1377f2a0472ec26e88b92be2346cbc3c8a69b41 100644 (file)
 #include <linux/types.h>
 #include <linux/fcntl.h>
 #include <linux/poll.h>
+#include <linux/inet_diag.h>
 #include <linux/init.h>
 #include <linux/fs.h>
 #include <linux/skbuff.h>
@@ -401,6 +402,7 @@ void tcp_init_sock(struct sock *sk)
        tp->snd_ssthresh = TCP_INFINITE_SSTHRESH;
        tp->snd_cwnd_clamp = ~0;
        tp->mss_cache = TCP_MSS_DEFAULT;
+       u64_stats_init(&tp->syncp);
 
        tp->reordering = sysctl_tcp_reordering;
        tcp_enable_early_retrans(tp);
@@ -2592,11 +2594,12 @@ EXPORT_SYMBOL(compat_tcp_setsockopt);
 #endif
 
 /* Return information about state of tcp endpoint in API format. */
-void tcp_get_info(const struct sock *sk, struct tcp_info *info)
+void tcp_get_info(struct sock *sk, struct tcp_info *info)
 {
        const struct tcp_sock *tp = tcp_sk(sk);
        const struct inet_connection_sock *icsk = inet_csk(sk);
        u32 now = tcp_time_stamp;
+       unsigned int start;
        u32 rate;
 
        memset(info, 0, sizeof(*info));
@@ -2663,6 +2666,12 @@ void tcp_get_info(const struct sock *sk, struct tcp_info *info)
 
        rate = READ_ONCE(sk->sk_max_pacing_rate);
        info->tcpi_max_pacing_rate = rate != ~0U ? rate : ~0ULL;
+
+       do {
+               start = u64_stats_fetch_begin_irq(&tp->syncp);
+               info->tcpi_bytes_acked = tp->bytes_acked;
+               info->tcpi_bytes_received = tp->bytes_received;
+       } while (u64_stats_fetch_retry_irq(&tp->syncp, start));
 }
 EXPORT_SYMBOL_GPL(tcp_get_info);
 
@@ -2734,6 +2743,26 @@ static int do_tcp_getsockopt(struct sock *sk, int level,
                        return -EFAULT;
                return 0;
        }
+       case TCP_CC_INFO: {
+               const struct tcp_congestion_ops *ca_ops;
+               union tcp_cc_info info;
+               size_t sz = 0;
+               int attr;
+
+               if (get_user(len, optlen))
+                       return -EFAULT;
+
+               ca_ops = icsk->icsk_ca_ops;
+               if (ca_ops && ca_ops->get_info)
+                       sz = ca_ops->get_info(sk, ~0U, &attr, &info);
+
+               len = min_t(unsigned int, len, sz);
+               if (put_user(len, optlen))
+                       return -EFAULT;
+               if (copy_to_user(optval, &info, len))
+                       return -EFAULT;
+               return 0;
+       }
        case TCP_QUICKACK:
                val = !icsk->icsk_ack.pingpong;
                break;
index 7a5ae50c80c87add1e46e8255f0837796d2e4947..84be008c945c654b692211b943f83e909a622516 100644 (file)
@@ -187,6 +187,7 @@ static void tcp_reinit_congestion_control(struct sock *sk,
 
        tcp_cleanup_congestion_control(sk);
        icsk->icsk_ca_ops = ca;
+       icsk->icsk_ca_setsockopt = 1;
 
        if (sk->sk_state != TCP_CLOSE && icsk->icsk_ca_ops->init)
                icsk->icsk_ca_ops->init(sk);
@@ -335,8 +336,10 @@ int tcp_set_congestion_control(struct sock *sk, const char *name)
        rcu_read_lock();
        ca = __tcp_ca_find_autoload(name);
        /* No change asking for existing value */
-       if (ca == icsk->icsk_ca_ops)
+       if (ca == icsk->icsk_ca_ops) {
+               icsk->icsk_ca_setsockopt = 1;
                goto out;
+       }
        if (!ca)
                err = -ENOENT;
        else if (!((ca->flags & TCP_CONG_NON_RESTRICTED) ||
index 4376016f7fa5cf84a3114d1551da623442c9c713..4c41c1287197eb4748198ae9532d1f6233aa7f6a 100644 (file)
@@ -277,7 +277,8 @@ static void dctcp_cwnd_event(struct sock *sk, enum tcp_ca_event ev)
        }
 }
 
-static int dctcp_get_info(struct sock *sk, u32 ext, struct sk_buff *skb)
+static size_t dctcp_get_info(struct sock *sk, u32 ext, int *attr,
+                            union tcp_cc_info *info)
 {
        const struct dctcp *ca = inet_csk_ca(sk);
 
@@ -286,18 +287,17 @@ static int dctcp_get_info(struct sock *sk, u32 ext, struct sk_buff *skb)
         */
        if (ext & (1 << (INET_DIAG_DCTCPINFO - 1)) ||
            ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
-               struct tcp_dctcp_info info;
-
-               memset(&info, 0, sizeof(info));
+               memset(info, 0, sizeof(struct tcp_dctcp_info));
                if (inet_csk(sk)->icsk_ca_ops != &dctcp_reno) {
-                       info.dctcp_enabled = 1;
-                       info.dctcp_ce_state = (u16) ca->ce_state;
-                       info.dctcp_alpha = ca->dctcp_alpha;
-                       info.dctcp_ab_ecn = ca->acked_bytes_ecn;
-                       info.dctcp_ab_tot = ca->acked_bytes_total;
+                       info->dctcp.dctcp_enabled = 1;
+                       info->dctcp.dctcp_ce_state = (u16) ca->ce_state;
+                       info->dctcp.dctcp_alpha = ca->dctcp_alpha;
+                       info->dctcp.dctcp_ab_ecn = ca->acked_bytes_ecn;
+                       info->dctcp.dctcp_ab_tot = ca->acked_bytes_total;
                }
 
-               return nla_put(skb, INET_DIAG_DCTCPINFO, sizeof(info), &info);
+               *attr = INET_DIAG_DCTCPINFO;
+               return sizeof(*info);
        }
        return 0;
 }
index e3d87aca6be8fafe02bec5a8f862a88a6fe79d50..46b087a27503acdf1ff55449c8e41269b1497e67 100644 (file)
@@ -206,6 +206,11 @@ static bool tcp_fastopen_create_child(struct sock *sk,
                        skb_set_owner_r(skb2, child);
                        __skb_queue_tail(&child->sk_receive_queue, skb2);
                        tp->syn_data_acked = 1;
+
+                       /* u64_stats_update_begin(&tp->syncp) not needed here,
+                        * as we certainly are not changing upper 32bit value (0)
+                        */
+                       tp->bytes_received = end_seq - TCP_SKB_CB(skb)->seq - 1;
                } else {
                        end_seq = TCP_SKB_CB(skb)->seq + 1;
                }
index 67476f085e4843dacaeb1b4d6b71ecc160c99b97..f71002e4db0ba7fe8dfe35bb2196bbaae751ed59 100644 (file)
@@ -300,24 +300,25 @@ static u32 tcp_illinois_ssthresh(struct sock *sk)
 }
 
 /* Extract info for Tcp socket info provided via netlink. */
-static int tcp_illinois_info(struct sock *sk, u32 ext, struct sk_buff *skb)
+static size_t tcp_illinois_info(struct sock *sk, u32 ext, int *attr,
+                               union tcp_cc_info *info)
 {
        const struct illinois *ca = inet_csk_ca(sk);
 
        if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
-               struct tcpvegas_info info = {
-                       .tcpv_enabled = 1,
-                       .tcpv_rttcnt = ca->cnt_rtt,
-                       .tcpv_minrtt = ca->base_rtt,
-               };
+               info->vegas.tcpv_enabled = 1;
+               info->vegas.tcpv_rttcnt = ca->cnt_rtt;
+               info->vegas.tcpv_minrtt = ca->base_rtt;
+               info->vegas.tcpv_rtt = 0;
 
-               if (info.tcpv_rttcnt > 0) {
+               if (info->vegas.tcpv_rttcnt > 0) {
                        u64 t = ca->sum_rtt;
 
-                       do_div(t, info.tcpv_rttcnt);
-                       info.tcpv_rtt = t;
+                       do_div(t, info->vegas.tcpv_rttcnt);
+                       info->vegas.tcpv_rtt = t;
                }
-               return nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
+               *attr = INET_DIAG_VEGASINFO;
+               return sizeof(struct tcpvegas_info);
        }
        return 0;
 }
index 3a4d9b34bed44a2f6b77b2be0e753867bad32292..c9ab964189a0162c7de19d4319f6c3e56194117b 100644 (file)
@@ -1820,14 +1820,12 @@ advance_sp:
        for (j = 0; j < used_sacks; j++)
                tp->recv_sack_cache[i++] = sp[j];
 
-       tcp_mark_lost_retrans(sk);
-
-       tcp_verify_left_out(tp);
-
        if ((state.reord < tp->fackets_out) &&
            ((inet_csk(sk)->icsk_ca_state != TCP_CA_Loss) || tp->undo_marker))
                tcp_update_reordering(sk, tp->fackets_out - state.reord, 0);
 
+       tcp_mark_lost_retrans(sk);
+       tcp_verify_left_out(tp);
 out:
 
 #if FASTRETRANS_DEBUG > 0
@@ -2700,16 +2698,21 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
        struct tcp_sock *tp = tcp_sk(sk);
        bool recovered = !before(tp->snd_una, tp->high_seq);
 
+       if ((flag & FLAG_SND_UNA_ADVANCED) &&
+           tcp_try_undo_loss(sk, false))
+               return;
+
        if (tp->frto) { /* F-RTO RFC5682 sec 3.1 (sack enhanced version). */
                /* Step 3.b. A timeout is spurious if not all data are
                 * lost, i.e., never-retransmitted data are (s)acked.
                 */
-               if (tcp_try_undo_loss(sk, flag & FLAG_ORIG_SACK_ACKED))
+               if ((flag & FLAG_ORIG_SACK_ACKED) &&
+                   tcp_try_undo_loss(sk, true))
                        return;
 
-               if (after(tp->snd_nxt, tp->high_seq) &&
-                   (flag & FLAG_DATA_SACKED || is_dupack)) {
-                       tp->frto = 0; /* Loss was real: 2nd part of step 3.a */
+               if (after(tp->snd_nxt, tp->high_seq)) {
+                       if (flag & FLAG_DATA_SACKED || is_dupack)
+                               tp->frto = 0; /* Step 3.a. loss was real */
                } else if (flag & FLAG_SND_UNA_ADVANCED && !recovered) {
                        tp->high_seq = tp->snd_nxt;
                        __tcp_push_pending_frames(sk, tcp_current_mss(sk),
@@ -2734,8 +2737,6 @@ static void tcp_process_loss(struct sock *sk, int flag, bool is_dupack)
                else if (flag & FLAG_SND_UNA_ADVANCED)
                        tcp_reset_reno_sack(tp);
        }
-       if (tcp_try_undo_loss(sk, false))
-               return;
        tcp_xmit_retransmit_queue(sk);
 }
 
@@ -3280,6 +3281,28 @@ static inline bool tcp_may_update_window(const struct tcp_sock *tp,
                (ack_seq == tp->snd_wl1 && nwin > tp->snd_wnd);
 }
 
+/* If we update tp->snd_una, also update tp->bytes_acked */
+static void tcp_snd_una_update(struct tcp_sock *tp, u32 ack)
+{
+       u32 delta = ack - tp->snd_una;
+
+       u64_stats_update_begin(&tp->syncp);
+       tp->bytes_acked += delta;
+       u64_stats_update_end(&tp->syncp);
+       tp->snd_una = ack;
+}
+
+/* If we update tp->rcv_nxt, also update tp->bytes_received */
+static void tcp_rcv_nxt_update(struct tcp_sock *tp, u32 seq)
+{
+       u32 delta = seq - tp->rcv_nxt;
+
+       u64_stats_update_begin(&tp->syncp);
+       tp->bytes_received += delta;
+       u64_stats_update_end(&tp->syncp);
+       tp->rcv_nxt = seq;
+}
+
 /* Update our send window.
  *
  * Window update algorithm, described in RFC793/RFC1122 (used in linux-2.2
@@ -3315,7 +3338,7 @@ static int tcp_ack_update_window(struct sock *sk, const struct sk_buff *skb, u32
                }
        }
 
-       tp->snd_una = ack;
+       tcp_snd_una_update(tp, ack);
 
        return flag;
 }
@@ -3497,7 +3520,7 @@ static int tcp_ack(struct sock *sk, const struct sk_buff *skb, int flag)
                 * Note, we use the fact that SND.UNA>=SND.WL2.
                 */
                tcp_update_wl(tp, ack_seq);
-               tp->snd_una = ack;
+               tcp_snd_una_update(tp, ack);
                flag |= FLAG_WIN_UPDATE;
 
                tcp_in_ack_event(sk, CA_ACK_WIN_UPDATE);
@@ -4236,7 +4259,7 @@ static void tcp_ofo_queue(struct sock *sk)
 
                tail = skb_peek_tail(&sk->sk_receive_queue);
                eaten = tail && tcp_try_coalesce(sk, tail, skb, &fragstolen);
-               tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+               tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
                if (!eaten)
                        __skb_queue_tail(&sk->sk_receive_queue, skb);
                if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
@@ -4404,7 +4427,7 @@ static int __must_check tcp_queue_rcv(struct sock *sk, struct sk_buff *skb, int
        __skb_pull(skb, hdrlen);
        eaten = (tail &&
                 tcp_try_coalesce(sk, tail, skb, fragstolen)) ? 1 : 0;
-       tcp_sk(sk)->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+       tcp_rcv_nxt_update(tcp_sk(sk), TCP_SKB_CB(skb)->end_seq);
        if (!eaten) {
                __skb_queue_tail(&sk->sk_receive_queue, skb);
                skb_set_owner_r(skb, sk);
@@ -4497,7 +4520,7 @@ queue_and_out:
 
                        eaten = tcp_queue_rcv(sk, skb, 0, &fragstolen);
                }
-               tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+               tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
                if (skb->len)
                        tcp_event_data_recv(sk, skb);
                if (TCP_SKB_CB(skb)->tcp_flags & TCPHDR_FIN)
@@ -5245,7 +5268,7 @@ void tcp_rcv_established(struct sock *sk, struct sk_buff *skb,
                                        tcp_rcv_rtt_measure_ts(sk, skb);
 
                                        __skb_pull(skb, tcp_header_len);
-                                       tp->rcv_nxt = TCP_SKB_CB(skb)->end_seq;
+                                       tcp_rcv_nxt_update(tp, TCP_SKB_CB(skb)->end_seq);
                                        NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPHPHITSTOUSER);
                                        eaten = 1;
                                }
index e5d7649136fcb31ca70b097dcbd9873df07e7417..17e7339ee5cadd077769de396b7568a7ccb73e13 100644 (file)
@@ -300,7 +300,7 @@ void tcp_time_wait(struct sock *sk, int state, int timeo)
                        tw->tw_v6_daddr = sk->sk_v6_daddr;
                        tw->tw_v6_rcv_saddr = sk->sk_v6_rcv_saddr;
                        tw->tw_tclass = np->tclass;
-                       tw->tw_flowlabel = np->flow_label >> 12;
+                       tw->tw_flowlabel = be32_to_cpu(np->flow_label & IPV6_FLOWLABEL_MASK);
                        tw->tw_ipv6only = sk->sk_ipv6only;
                }
 #endif
@@ -420,7 +420,10 @@ void tcp_ca_openreq_child(struct sock *sk, const struct dst_entry *dst)
                rcu_read_unlock();
        }
 
-       if (!ca_got_dst && !try_module_get(icsk->icsk_ca_ops->owner))
+       /* If no valid choice made yet, assign current system default ca. */
+       if (!ca_got_dst &&
+           (!icsk->icsk_ca_setsockopt ||
+            !try_module_get(icsk->icsk_ca_ops->owner)))
                tcp_assign_congestion_control(sk);
 
        tcp_set_ca_state(sk, TCP_CA_Open);
index c71a1b8f7bde3082a6520128bb6f47d3081de8ac..a6cea1d5e20d47f06eab95f3344a3e3b7c44da89 100644 (file)
@@ -286,18 +286,19 @@ static void tcp_vegas_cong_avoid(struct sock *sk, u32 ack, u32 acked)
 }
 
 /* Extract info for Tcp socket info provided via netlink. */
-int tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb)
+size_t tcp_vegas_get_info(struct sock *sk, u32 ext, int *attr,
+                         union tcp_cc_info *info)
 {
        const struct vegas *ca = inet_csk_ca(sk);
+
        if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
-               struct tcpvegas_info info = {
-                       .tcpv_enabled = ca->doing_vegas_now,
-                       .tcpv_rttcnt = ca->cntRTT,
-                       .tcpv_rtt = ca->baseRTT,
-                       .tcpv_minrtt = ca->minRTT,
-               };
-
-               return nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
+               info->vegas.tcpv_enabled = ca->doing_vegas_now,
+               info->vegas.tcpv_rttcnt = ca->cntRTT,
+               info->vegas.tcpv_rtt = ca->baseRTT,
+               info->vegas.tcpv_minrtt = ca->minRTT,
+
+               *attr = INET_DIAG_VEGASINFO;
+               return sizeof(struct tcpvegas_info);
        }
        return 0;
 }
index e8a6b33cc61dd7c4d58ea7d3ebd1dd1f35041de9..ef9da5306c685b269cc1efe64ee40196faf11e66 100644 (file)
@@ -19,6 +19,7 @@ void tcp_vegas_init(struct sock *sk);
 void tcp_vegas_state(struct sock *sk, u8 ca_state);
 void tcp_vegas_pkts_acked(struct sock *sk, u32 cnt, s32 rtt_us);
 void tcp_vegas_cwnd_event(struct sock *sk, enum tcp_ca_event event);
-int tcp_vegas_get_info(struct sock *sk, u32 ext, struct sk_buff *skb);
+size_t tcp_vegas_get_info(struct sock *sk, u32 ext, int *attr,
+                         union tcp_cc_info *info);
 
 #endif /* __TCP_VEGAS_H */
index b3c57cceb9907fe9d79f33f33369a96b647b5f26..c10732e39837872c724b801700f627a7fb1c9390 100644 (file)
@@ -256,18 +256,19 @@ static void tcp_westwood_event(struct sock *sk, enum tcp_ca_event event)
 }
 
 /* Extract info for Tcp socket info provided via netlink. */
-static int tcp_westwood_info(struct sock *sk, u32 ext, struct sk_buff *skb)
+static size_t tcp_westwood_info(struct sock *sk, u32 ext, int *attr,
+                               union tcp_cc_info *info)
 {
        const struct westwood *ca = inet_csk_ca(sk);
 
        if (ext & (1 << (INET_DIAG_VEGASINFO - 1))) {
-               struct tcpvegas_info info = {
-                       .tcpv_enabled = 1,
-                       .tcpv_rtt = jiffies_to_usecs(ca->rtt),
-                       .tcpv_minrtt = jiffies_to_usecs(ca->rtt_min),
-               };
+               info->vegas.tcpv_enabled = 1;
+               info->vegas.tcpv_rttcnt = 0;
+               info->vegas.tcpv_rtt    = jiffies_to_usecs(ca->rtt),
+               info->vegas.tcpv_minrtt = jiffies_to_usecs(ca->rtt_min),
 
-               return nla_put(skb, INET_DIAG_VEGASINFO, sizeof(info), &info);
+               *attr = INET_DIAG_VEGASINFO;
+               return sizeof(struct tcpvegas_info);
        }
        return 0;
 }
index d10b7e0112ebdb8fa61c650725ae7fae68f7e669..83aa604f9273c332c5a0e5399253d961ef92eb9a 100644 (file)
@@ -90,6 +90,7 @@
 #include <linux/socket.h>
 #include <linux/sockios.h>
 #include <linux/igmp.h>
+#include <linux/inetdevice.h>
 #include <linux/in.h>
 #include <linux/errno.h>
 #include <linux/timer.h>
@@ -1345,10 +1346,8 @@ csum_copy_err:
        }
        unlock_sock_fast(sk, slow);
 
-       if (noblock)
-               return -EAGAIN;
-
-       /* starting over for a new packet */
+       /* starting over for a new packet, but check if we need to yield */
+       cond_resched();
        msg->msg_flags &= ~MSG_TRUNC;
        goto try_again;
 }
@@ -1962,6 +1961,7 @@ void udp_v4_early_demux(struct sk_buff *skb)
        struct sock *sk;
        struct dst_entry *dst;
        int dif = skb->dev->ifindex;
+       int ours;
 
        /* validate the packet */
        if (!pskb_may_pull(skb, skb_transport_offset(skb) + sizeof(struct udphdr)))
@@ -1971,14 +1971,24 @@ void udp_v4_early_demux(struct sk_buff *skb)
        uh = udp_hdr(skb);
 
        if (skb->pkt_type == PACKET_BROADCAST ||
-           skb->pkt_type == PACKET_MULTICAST)
+           skb->pkt_type == PACKET_MULTICAST) {
+               struct in_device *in_dev = __in_dev_get_rcu(skb->dev);
+
+               if (!in_dev)
+                       return;
+
+               ours = ip_check_mc_rcu(in_dev, iph->daddr, iph->saddr,
+                                      iph->protocol);
+               if (!ours)
+                       return;
                sk = __udp4_lib_mcast_demux_lookup(net, uh->dest, iph->daddr,
                                                   uh->source, iph->saddr, dif);
-       else if (skb->pkt_type == PACKET_HOST)
+       } else if (skb->pkt_type == PACKET_HOST) {
                sk = __udp4_lib_demux_lookup(net, uh->dest, iph->daddr,
                                             uh->source, iph->saddr, dif);
-       else
+       } else {
                return;
+       }
 
        if (!sk)
                return;
index d873ceea86e6c74c34e7fcd31bec41c78ce5720b..ca09bf49ac6806b399dba51399f84e47590cb9ed 100644 (file)
@@ -133,6 +133,14 @@ static void snmp6_free_dev(struct inet6_dev *idev)
        free_percpu(idev->stats.ipv6);
 }
 
+static void in6_dev_finish_destroy_rcu(struct rcu_head *head)
+{
+       struct inet6_dev *idev = container_of(head, struct inet6_dev, rcu);
+
+       snmp6_free_dev(idev);
+       kfree(idev);
+}
+
 /* Nobody refers to this device, we may destroy it. */
 
 void in6_dev_finish_destroy(struct inet6_dev *idev)
@@ -151,7 +159,6 @@ void in6_dev_finish_destroy(struct inet6_dev *idev)
                pr_warn("Freeing alive inet6 device %p\n", idev);
                return;
        }
-       snmp6_free_dev(idev);
-       kfree_rcu(idev, rcu);
+       call_rcu(&idev->rcu, in6_dev_finish_destroy_rcu);
 }
 EXPORT_SYMBOL(in6_dev_finish_destroy);
index 31f1b5d5e2ef8f7056eb8eddd513ba5b3343e2b1..7c07ce36aae2a5b9cc14cb5a883327b7230b38ee 100644 (file)
@@ -248,7 +248,8 @@ static int esp6_output(struct xfrm_state *x, struct sk_buff *skb)
        aead_givcrypt_set_crypt(req, sg, sg, clen, iv);
        aead_givcrypt_set_assoc(req, asg, assoclen);
        aead_givcrypt_set_giv(req, esph->enc_data,
-                             XFRM_SKB_CB(skb)->seq.output.low);
+                             XFRM_SKB_CB(skb)->seq.output.low +
+                             ((u64)XFRM_SKB_CB(skb)->seq.output.hi << 32));
 
        ESP_SKB_CB(skb)->tmp = tmp;
        err = crypto_aead_givencrypt(req);
index 96dbffff5a2400bfca0a7b0bee9072d76ec92e88..bde57b113009794637a07b405173bef1fd3c6fb3 100644 (file)
@@ -693,6 +693,7 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
 {
        struct rt6_info *iter = NULL;
        struct rt6_info **ins;
+       struct rt6_info **fallback_ins = NULL;
        int replace = (info->nlh &&
                       (info->nlh->nlmsg_flags & NLM_F_REPLACE));
        int add = (!info->nlh ||
@@ -716,8 +717,13 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
                            (info->nlh->nlmsg_flags & NLM_F_EXCL))
                                return -EEXIST;
                        if (replace) {
-                               found++;
-                               break;
+                               if (rt_can_ecmp == rt6_qualify_for_ecmp(iter)) {
+                                       found++;
+                                       break;
+                               }
+                               if (rt_can_ecmp)
+                                       fallback_ins = fallback_ins ?: ins;
+                               goto next_iter;
                        }
 
                        if (iter->dst.dev == rt->dst.dev &&
@@ -753,9 +759,17 @@ static int fib6_add_rt2node(struct fib6_node *fn, struct rt6_info *rt,
                if (iter->rt6i_metric > rt->rt6i_metric)
                        break;
 
+next_iter:
                ins = &iter->dst.rt6_next;
        }
 
+       if (fallback_ins && !found) {
+               /* No ECMP-able route found, replace first non-ECMP one */
+               ins = fallback_ins;
+               iter = *ins;
+               found++;
+       }
+
        /* Reset round-robin state, if necessary */
        if (ins == &fn->leaf)
                fn->rr_ptr = NULL;
@@ -815,6 +829,8 @@ add:
                }
 
        } else {
+               int nsiblings;
+
                if (!found) {
                        if (add)
                                goto add;
@@ -835,8 +851,27 @@ add:
                        info->nl_net->ipv6.rt6_stats->fib_route_nodes++;
                        fn->fn_flags |= RTN_RTINFO;
                }
+               nsiblings = iter->rt6i_nsiblings;
                fib6_purge_rt(iter, fn, info->nl_net);
                rt6_release(iter);
+
+               if (nsiblings) {
+                       /* Replacing an ECMP route, remove all siblings */
+                       ins = &rt->dst.rt6_next;
+                       iter = *ins;
+                       while (iter) {
+                               if (rt6_qualify_for_ecmp(iter)) {
+                                       *ins = iter->dst.rt6_next;
+                                       fib6_purge_rt(iter, fn, info->nl_net);
+                                       rt6_release(iter);
+                                       nsiblings--;
+                               } else {
+                                       ins = &iter->dst.rt6_next;
+                               }
+                               iter = *ins;
+                       }
+                       WARN_ON(nsiblings != 0);
+               }
        }
 
        return 0;
index 7fde1f265c90e90f16291e6c861b6e242111c25b..bc09cb97b8401011c112afe469fd231382387622 100644 (file)
@@ -886,22 +886,45 @@ static int ip6_dst_lookup_tail(struct sock *sk,
 #endif
        int err;
 
-       if (!*dst)
-               *dst = ip6_route_output(net, sk, fl6);
-
-       err = (*dst)->error;
-       if (err)
-               goto out_err_release;
+       /* The correct way to handle this would be to do
+        * ip6_route_get_saddr, and then ip6_route_output; however,
+        * the route-specific preferred source forces the
+        * ip6_route_output call _before_ ip6_route_get_saddr.
+        *
+        * In source specific routing (no src=any default route),
+        * ip6_route_output will fail given src=any saddr, though, so
+        * that's why we try it again later.
+        */
+       if (ipv6_addr_any(&fl6->saddr) && (!*dst || !(*dst)->error)) {
+               struct rt6_info *rt;
+               bool had_dst = *dst != NULL;
 
-       if (ipv6_addr_any(&fl6->saddr)) {
-               struct rt6_info *rt = (struct rt6_info *) *dst;
+               if (!had_dst)
+                       *dst = ip6_route_output(net, sk, fl6);
+               rt = (*dst)->error ? NULL : (struct rt6_info *)*dst;
                err = ip6_route_get_saddr(net, rt, &fl6->daddr,
                                          sk ? inet6_sk(sk)->srcprefs : 0,
                                          &fl6->saddr);
                if (err)
                        goto out_err_release;
+
+               /* If we had an erroneous initial result, pretend it
+                * never existed and let the SA-enabled version take
+                * over.
+                */
+               if (!had_dst && (*dst)->error) {
+                       dst_release(*dst);
+                       *dst = NULL;
+               }
        }
 
+       if (!*dst)
+               *dst = ip6_route_output(net, sk, fl6);
+
+       err = (*dst)->error;
+       if (err)
+               goto out_err_release;
+
 #ifdef CONFIG_IPV6_OPTIMISTIC_DAD
        /*
         * Here if the dst entry we've looked up
@@ -1277,8 +1300,10 @@ emsgsize:
 
        /* If this is the first and only packet and device
         * supports checksum offloading, let's use it.
+        * Use transhdrlen, same as IPv4, because partial
+        * sums only work when transhdrlen is set.
         */
-       if (!skb && sk->sk_protocol == IPPROTO_UDP &&
+       if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
            length + fragheaderlen < mtu &&
            rt->dst.dev->features & NETIF_F_V6_CSUM &&
            !exthdrlen)
index ed9d681207fa340881fd100db0ea1cb3eb9a2ffb..0224c032dca5dca98ea0146bcdf52c179fa23f6d 100644 (file)
@@ -322,7 +322,6 @@ static int vti6_rcv(struct sk_buff *skb)
                }
 
                XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6 = t;
-               skb->mark = be32_to_cpu(t->parms.i_key);
 
                rcu_read_unlock();
 
@@ -342,6 +341,8 @@ static int vti6_rcv_cb(struct sk_buff *skb, int err)
        struct pcpu_sw_netstats *tstats;
        struct xfrm_state *x;
        struct ip6_tnl *t = XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6;
+       u32 orig_mark = skb->mark;
+       int ret;
 
        if (!t)
                return 1;
@@ -358,7 +359,11 @@ static int vti6_rcv_cb(struct sk_buff *skb, int err)
        x = xfrm_input_state(skb);
        family = x->inner_mode->afinfo->family;
 
-       if (!xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family))
+       skb->mark = be32_to_cpu(t->parms.i_key);
+       ret = xfrm_policy_check(NULL, XFRM_POLICY_IN, skb, family);
+       skb->mark = orig_mark;
+
+       if (!ret)
                return -EPERM;
 
        skb_scrub_packet(skb, !net_eq(t->net, dev_net(skb->dev)));
@@ -430,6 +435,7 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
        struct net_device *tdev;
        struct xfrm_state *x;
        int err = -1;
+       int mtu;
 
        if (!dst)
                goto tx_err_link_failure;
@@ -463,6 +469,19 @@ vti6_xmit(struct sk_buff *skb, struct net_device *dev, struct flowi *fl)
        skb_dst_set(skb, dst);
        skb->dev = skb_dst(skb)->dev;
 
+       mtu = dst_mtu(dst);
+       if (!skb->ignore_df && skb->len > mtu) {
+               skb_dst(skb)->ops->update_pmtu(dst, NULL, skb, mtu);
+
+               if (skb->protocol == htons(ETH_P_IPV6))
+                       icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
+               else
+                       icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
+                                 htonl(mtu));
+
+               return -EMSGSIZE;
+       }
+
        err = dst_output(skb);
        if (net_xmit_eval(err) == 0) {
                struct pcpu_sw_netstats *tstats = this_cpu_ptr(dev->tstats);
@@ -495,7 +514,6 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
        int ret;
 
        memset(&fl, 0, sizeof(fl));
-       skb->mark = be32_to_cpu(t->parms.o_key);
 
        switch (skb->protocol) {
        case htons(ETH_P_IPV6):
@@ -516,6 +534,9 @@ vti6_tnl_xmit(struct sk_buff *skb, struct net_device *dev)
                goto tx_err;
        }
 
+       /* override mark with tunnel output key */
+       fl.flowi_mark = be32_to_cpu(t->parms.o_key);
+
        ret = vti6_xmit(skb, dev, &fl);
        if (ret < 0)
                goto tx_err;
index 1a732a1d3c8e13c58508cef9381d2d32e5a34448..62f5b0d0bc9bfbf19940ba0c70ef9da464bf467f 100644 (file)
@@ -1275,6 +1275,9 @@ do_replace(struct net *net, const void __user *user, unsigned int len)
        /* overflow check */
        if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
                return -ENOMEM;
+       if (tmp.num_counters == 0)
+               return -EINVAL;
+
        tmp.name[sizeof(tmp.name)-1] = 0;
 
        newinfo = xt_alloc_table_info(tmp.size);
@@ -1822,6 +1825,9 @@ compat_do_replace(struct net *net, void __user *user, unsigned int len)
                return -ENOMEM;
        if (tmp.num_counters >= INT_MAX / sizeof(struct xt_counters))
                return -ENOMEM;
+       if (tmp.num_counters == 0)
+               return -EINVAL;
+
        tmp.name[sizeof(tmp.name)-1] = 0;
 
        newinfo = xt_alloc_table_info(tmp.size);
index 5c48293ff06235e72f586007ff1e7bb568733b92..c73ae5039e46d3811d60bf5df9e9482d966a4966 100644 (file)
@@ -2245,9 +2245,10 @@ int ip6_route_get_saddr(struct net *net,
                        unsigned int prefs,
                        struct in6_addr *saddr)
 {
-       struct inet6_dev *idev = ip6_dst_idev((struct dst_entry *)rt);
+       struct inet6_dev *idev =
+               rt ? ip6_dst_idev((struct dst_entry *)rt) : NULL;
        int err = 0;
-       if (rt->rt6i_prefsrc.plen)
+       if (rt && rt->rt6i_prefsrc.plen)
                *saddr = rt->rt6i_prefsrc.addr;
        else
                err = ipv6_dev_get_saddr(net, idev ? idev->dev : NULL,
@@ -2503,9 +2504,9 @@ static int ip6_route_multipath(struct fib6_config *cfg, int add)
        int attrlen;
        int err = 0, last_err = 0;
 
+       remaining = cfg->fc_mp_len;
 beginning:
        rtnh = (struct rtnexthop *)cfg->fc_mp;
-       remaining = cfg->fc_mp_len;
 
        /* Parse a Multipath Entry */
        while (rtnh_ok(rtnh, remaining)) {
@@ -2535,15 +2536,19 @@ beginning:
                                 * next hops that have been already added.
                                 */
                                add = 0;
+                               remaining = cfg->fc_mp_len - remaining;
                                goto beginning;
                        }
                }
                /* Because each route is added like a single route we remove
-                * this flag after the first nexthop (if there is a collision,
-                * we have already fail to add the first nexthop:
-                * fib6_add_rt2node() has reject it).
+                * these flags after the first nexthop: if there is a collision,
+                * we have already failed to add the first nexthop:
+                * fib6_add_rt2node() has rejected it; when replacing, old
+                * nexthops have been replaced by first new, the rest should
+                * be added to it.
                 */
-               cfg->fc_nlinfo.nlh->nlmsg_flags &= ~NLM_F_EXCL;
+               cfg->fc_nlinfo.nlh->nlmsg_flags &= ~(NLM_F_EXCL |
+                                                    NLM_F_REPLACE);
                rtnh = rtnh_next(rtnh, &remaining);
        }
 
index b6575d6655681e8e84993a5db929c7309d47d4d3..3adffb300238ebdaf729871bafbb348e82fbde56 100644 (file)
@@ -914,7 +914,7 @@ static void tcp_v6_timewait_ack(struct sock *sk, struct sk_buff *skb)
                        tcptw->tw_rcv_wnd >> tw->tw_rcv_wscale,
                        tcp_time_stamp + tcptw->tw_ts_offset,
                        tcptw->tw_ts_recent, tw->tw_bound_dev_if, tcp_twsk_md5_key(tcptw),
-                       tw->tw_tclass, (tw->tw_flowlabel << 12));
+                       tw->tw_tclass, cpu_to_be32(tw->tw_flowlabel));
 
        inet_twsk_put(tw);
 }
index 3477c919fcc8eb534c3a438ab6d607a6215897f8..e51fc3eee6dbd65506e8612fc5782b9482cf4708 100644 (file)
@@ -525,10 +525,8 @@ csum_copy_err:
        }
        unlock_sock_fast(sk, slow);
 
-       if (noblock)
-               return -EAGAIN;
-
-       /* starting over for a new packet */
+       /* starting over for a new packet, but check if we need to yield */
+       cond_resched();
        msg->msg_flags &= ~MSG_TRUNC;
        goto try_again;
 }
@@ -731,7 +729,9 @@ static bool __udp_v6_is_mcast_sock(struct net *net, struct sock *sk,
            (inet->inet_dport && inet->inet_dport != rmt_port) ||
            (!ipv6_addr_any(&sk->sk_v6_daddr) &&
                    !ipv6_addr_equal(&sk->sk_v6_daddr, rmt_addr)) ||
-           (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif))
+           (sk->sk_bound_dev_if && sk->sk_bound_dev_if != dif) ||
+           (!ipv6_addr_any(&sk->sk_v6_rcv_saddr) &&
+                   !ipv6_addr_equal(&sk->sk_v6_rcv_saddr, loc_addr)))
                return false;
        if (!inet6_mc_check(sk, loc_addr, rmt_addr))
                return false;
index 265e42721a661cf54a46246065168d6a17885147..ff347a0eebd4fdbcbd1580c8af0450c23f673f85 100644 (file)
@@ -2495,51 +2495,22 @@ static bool ieee80211_coalesce_started_roc(struct ieee80211_local *local,
                                           struct ieee80211_roc_work *new_roc,
                                           struct ieee80211_roc_work *cur_roc)
 {
-       unsigned long j = jiffies;
-       unsigned long cur_roc_end = cur_roc->hw_start_time +
-                                   msecs_to_jiffies(cur_roc->duration);
-       struct ieee80211_roc_work *next_roc;
-       int new_dur;
+       unsigned long now = jiffies;
+       unsigned long remaining = cur_roc->hw_start_time +
+                                 msecs_to_jiffies(cur_roc->duration) -
+                                 now;
 
        if (WARN_ON(!cur_roc->started || !cur_roc->hw_begun))
                return false;
 
-       if (time_after(j + IEEE80211_ROC_MIN_LEFT, cur_roc_end))
+       /* if it doesn't fit entirely, schedule a new one */
+       if (new_roc->duration > jiffies_to_msecs(remaining))
                return false;
 
        ieee80211_handle_roc_started(new_roc);
 
-       new_dur = new_roc->duration - jiffies_to_msecs(cur_roc_end - j);
-
-       /* cur_roc is long enough - add new_roc to the dependents list. */
-       if (new_dur <= 0) {
-               list_add_tail(&new_roc->list, &cur_roc->dependents);
-               return true;
-       }
-
-       new_roc->duration = new_dur;
-
-       /*
-        * if cur_roc was already coalesced before, we might
-        * want to extend the next roc instead of adding
-        * a new one.
-        */
-       next_roc = list_entry(cur_roc->list.next,
-                             struct ieee80211_roc_work, list);
-       if (&next_roc->list != &local->roc_list &&
-           next_roc->chan == new_roc->chan &&
-           next_roc->sdata == new_roc->sdata &&
-           !WARN_ON(next_roc->started)) {
-               list_add_tail(&new_roc->list, &next_roc->dependents);
-               next_roc->duration = max(next_roc->duration,
-                                        new_roc->duration);
-               next_roc->type = max(next_roc->type, new_roc->type);
-               return true;
-       }
-
-       /* add right after cur_roc */
-       list_add(&new_roc->list, &cur_roc->list);
-
+       /* add to dependents so we send the expired event properly */
+       list_add_tail(&new_roc->list, &cur_roc->dependents);
        return true;
 }
 
@@ -2652,17 +2623,9 @@ static int ieee80211_start_roc_work(struct ieee80211_local *local,
                         * In the offloaded ROC case, if it hasn't begun, add
                         * this new one to the dependent list to be handled
                         * when the master one begins. If it has begun,
-                        * check that there's still a minimum time left and
-                        * if so, start this one, transmitting the frame, but
-                        * add it to the list directly after this one with
-                        * a reduced time so we'll ask the driver to execute
-                        * it right after finishing the previous one, in the
-                        * hope that it'll also be executed right afterwards,
-                        * effectively extending the old one.
-                        * If there's no minimum time left, just add it to the
-                        * normal list.
-                        * TODO: the ROC type is ignored here, assuming that it
-                        * is better to immediately use the current ROC.
+                        * check if it fits entirely within the existing one,
+                        * in which case it will just be dependent as well.
+                        * Otherwise, schedule it by itself.
                         */
                        if (!tmp->hw_begun) {
                                list_add_tail(&roc->list, &tmp->dependents);
index ab46ab4a72498fd04f1c12ac6bb44f867d86869b..c0a9187bc3a9d579b36824fa64ecbbcbd6575110 100644 (file)
@@ -205,6 +205,8 @@ enum ieee80211_packet_rx_flags {
  * @IEEE80211_RX_CMNTR: received on cooked monitor already
  * @IEEE80211_RX_BEACON_REPORTED: This frame was already reported
  *     to cfg80211_report_obss_beacon().
+ * @IEEE80211_RX_REORDER_TIMER: this frame is released by the
+ *     reorder buffer timeout timer, not the normal RX path
  *
  * These flags are used across handling multiple interfaces
  * for a single frame.
@@ -212,6 +214,7 @@ enum ieee80211_packet_rx_flags {
 enum ieee80211_rx_flags {
        IEEE80211_RX_CMNTR              = BIT(0),
        IEEE80211_RX_BEACON_REPORTED    = BIT(1),
+       IEEE80211_RX_REORDER_TIMER      = BIT(2),
 };
 
 struct ieee80211_rx_data {
@@ -325,12 +328,6 @@ struct mesh_preq_queue {
        u8 flags;
 };
 
-#if HZ/100 == 0
-#define IEEE80211_ROC_MIN_LEFT 1
-#else
-#define IEEE80211_ROC_MIN_LEFT (HZ/100)
-#endif
-
 struct ieee80211_roc_work {
        struct list_head list;
        struct list_head dependents;
index b4ac596a7cb76205cf39d935708d3383490c2b73..84cef600c5730e74c6456e801ffa93ef55e4e47f 100644 (file)
@@ -522,6 +522,12 @@ int ieee80211_do_open(struct wireless_dev *wdev, bool coming_up)
                memcpy(sdata->vif.hw_queue, master->vif.hw_queue,
                       sizeof(sdata->vif.hw_queue));
                sdata->vif.bss_conf.chandef = master->vif.bss_conf.chandef;
+
+               mutex_lock(&local->key_mtx);
+               sdata->crypto_tx_tailroom_needed_cnt +=
+                       master->crypto_tx_tailroom_needed_cnt;
+               mutex_unlock(&local->key_mtx);
+
                break;
                }
        case NL80211_IFTYPE_AP:
@@ -819,13 +825,15 @@ static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata,
         * (because if we remove a STA after ops->remove_interface()
         * the driver will have removed the vif info already!)
         *
-        * This is relevant only in WDS mode, in all other modes we've
-        * already removed all stations when disconnecting or similar,
-        * so warn otherwise.
+        * In WDS mode a station must exist here and be flushed, for
+        * AP_VLANs stations may exist since there's nothing else that
+        * would have removed them, but in other modes there shouldn't
+        * be any stations.
         */
        flushed = sta_info_flush(sdata);
-       WARN_ON_ONCE((sdata->vif.type != NL80211_IFTYPE_WDS && flushed > 0) ||
-                    (sdata->vif.type == NL80211_IFTYPE_WDS && flushed != 1));
+       WARN_ON_ONCE(sdata->vif.type != NL80211_IFTYPE_AP_VLAN &&
+                    ((sdata->vif.type != NL80211_IFTYPE_WDS && flushed > 0) ||
+                     (sdata->vif.type == NL80211_IFTYPE_WDS && flushed != 1)));
 
        /* don't count this interface for promisc/allmulti while it is down */
        if (sdata->flags & IEEE80211_SDATA_ALLMULTI)
index 2291cd7300911514db84c0135369b807e93a9d06..a907f2d5c12d857bf1811af24e57f5af09eb8665 100644 (file)
@@ -58,6 +58,22 @@ static void assert_key_lock(struct ieee80211_local *local)
        lockdep_assert_held(&local->key_mtx);
 }
 
+static void
+update_vlan_tailroom_need_count(struct ieee80211_sub_if_data *sdata, int delta)
+{
+       struct ieee80211_sub_if_data *vlan;
+
+       if (sdata->vif.type != NL80211_IFTYPE_AP)
+               return;
+
+       mutex_lock(&sdata->local->mtx);
+
+       list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
+               vlan->crypto_tx_tailroom_needed_cnt += delta;
+
+       mutex_unlock(&sdata->local->mtx);
+}
+
 static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
 {
        /*
@@ -79,6 +95,8 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
         * http://mid.gmane.org/1308590980.4322.19.camel@jlt3.sipsolutions.net
         */
 
+       update_vlan_tailroom_need_count(sdata, 1);
+
        if (!sdata->crypto_tx_tailroom_needed_cnt++) {
                /*
                 * Flush all XMIT packets currently using HW encryption or no
@@ -88,6 +106,15 @@ static void increment_tailroom_need_count(struct ieee80211_sub_if_data *sdata)
        }
 }
 
+static void decrease_tailroom_need_count(struct ieee80211_sub_if_data *sdata,
+                                        int delta)
+{
+       WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt < delta);
+
+       update_vlan_tailroom_need_count(sdata, -delta);
+       sdata->crypto_tx_tailroom_needed_cnt -= delta;
+}
+
 static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
 {
        struct ieee80211_sub_if_data *sdata;
@@ -144,7 +171,7 @@ static int ieee80211_key_enable_hw_accel(struct ieee80211_key *key)
 
                if (!((key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_MMIC) ||
                      (key->conf.flags & IEEE80211_KEY_FLAG_RESERVE_TAILROOM)))
-                       sdata->crypto_tx_tailroom_needed_cnt--;
+                       decrease_tailroom_need_count(sdata, 1);
 
                WARN_ON((key->conf.flags & IEEE80211_KEY_FLAG_PUT_IV_SPACE) &&
                        (key->conf.flags & IEEE80211_KEY_FLAG_GENERATE_IV));
@@ -541,7 +568,7 @@ static void __ieee80211_key_destroy(struct ieee80211_key *key,
                        schedule_delayed_work(&sdata->dec_tailroom_needed_wk,
                                              HZ/2);
                } else {
-                       sdata->crypto_tx_tailroom_needed_cnt--;
+                       decrease_tailroom_need_count(sdata, 1);
                }
        }
 
@@ -631,6 +658,7 @@ void ieee80211_key_free(struct ieee80211_key *key, bool delay_tailroom)
 void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
 {
        struct ieee80211_key *key;
+       struct ieee80211_sub_if_data *vlan;
 
        ASSERT_RTNL();
 
@@ -639,7 +667,14 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
 
        mutex_lock(&sdata->local->key_mtx);
 
-       sdata->crypto_tx_tailroom_needed_cnt = 0;
+       WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt ||
+                    sdata->crypto_tx_tailroom_pending_dec);
+
+       if (sdata->vif.type == NL80211_IFTYPE_AP) {
+               list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
+                       WARN_ON_ONCE(vlan->crypto_tx_tailroom_needed_cnt ||
+                                    vlan->crypto_tx_tailroom_pending_dec);
+       }
 
        list_for_each_entry(key, &sdata->key_list, list) {
                increment_tailroom_need_count(sdata);
@@ -649,6 +684,22 @@ void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata)
        mutex_unlock(&sdata->local->key_mtx);
 }
 
+void ieee80211_reset_crypto_tx_tailroom(struct ieee80211_sub_if_data *sdata)
+{
+       struct ieee80211_sub_if_data *vlan;
+
+       mutex_lock(&sdata->local->key_mtx);
+
+       sdata->crypto_tx_tailroom_needed_cnt = 0;
+
+       if (sdata->vif.type == NL80211_IFTYPE_AP) {
+               list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
+                       vlan->crypto_tx_tailroom_needed_cnt = 0;
+       }
+
+       mutex_unlock(&sdata->local->key_mtx);
+}
+
 void ieee80211_iter_keys(struct ieee80211_hw *hw,
                         struct ieee80211_vif *vif,
                         void (*iter)(struct ieee80211_hw *hw,
@@ -688,8 +739,8 @@ static void ieee80211_free_keys_iface(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_key *key, *tmp;
 
-       sdata->crypto_tx_tailroom_needed_cnt -=
-               sdata->crypto_tx_tailroom_pending_dec;
+       decrease_tailroom_need_count(sdata,
+                                    sdata->crypto_tx_tailroom_pending_dec);
        sdata->crypto_tx_tailroom_pending_dec = 0;
 
        ieee80211_debugfs_key_remove_mgmt_default(sdata);
@@ -709,6 +760,7 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata,
 {
        struct ieee80211_local *local = sdata->local;
        struct ieee80211_sub_if_data *vlan;
+       struct ieee80211_sub_if_data *master;
        struct ieee80211_key *key, *tmp;
        LIST_HEAD(keys);
 
@@ -728,8 +780,20 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata,
        list_for_each_entry_safe(key, tmp, &keys, list)
                __ieee80211_key_destroy(key, false);
 
-       WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt ||
-                    sdata->crypto_tx_tailroom_pending_dec);
+       if (sdata->vif.type == NL80211_IFTYPE_AP_VLAN) {
+               if (sdata->bss) {
+                       master = container_of(sdata->bss,
+                                             struct ieee80211_sub_if_data,
+                                             u.ap);
+
+                       WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt !=
+                                    master->crypto_tx_tailroom_needed_cnt);
+               }
+       } else {
+               WARN_ON_ONCE(sdata->crypto_tx_tailroom_needed_cnt ||
+                            sdata->crypto_tx_tailroom_pending_dec);
+       }
+
        if (sdata->vif.type == NL80211_IFTYPE_AP) {
                list_for_each_entry(vlan, &sdata->u.ap.vlans, u.vlan.list)
                        WARN_ON_ONCE(vlan->crypto_tx_tailroom_needed_cnt ||
@@ -793,8 +857,8 @@ void ieee80211_delayed_tailroom_dec(struct work_struct *wk)
         */
 
        mutex_lock(&sdata->local->key_mtx);
-       sdata->crypto_tx_tailroom_needed_cnt -=
-               sdata->crypto_tx_tailroom_pending_dec;
+       decrease_tailroom_need_count(sdata,
+                                    sdata->crypto_tx_tailroom_pending_dec);
        sdata->crypto_tx_tailroom_pending_dec = 0;
        mutex_unlock(&sdata->local->key_mtx);
 }
index c5a31835be0e0ca22c154b1345d91be761308833..96557dd1e77dff325072cff12b7b671aad942015 100644 (file)
@@ -161,6 +161,7 @@ void ieee80211_free_keys(struct ieee80211_sub_if_data *sdata,
 void ieee80211_free_sta_keys(struct ieee80211_local *local,
                             struct sta_info *sta);
 void ieee80211_enable_keys(struct ieee80211_sub_if_data *sdata);
+void ieee80211_reset_crypto_tx_tailroom(struct ieee80211_sub_if_data *sdata);
 
 #define key_mtx_dereference(local, ref) \
        rcu_dereference_protected(ref, lockdep_is_held(&((local)->key_mtx)))
index 260eed45b6d2ff105052643169465c04d333c182..5793f75c5ffde91de02e9698bd27500ff4640826 100644 (file)
@@ -2121,7 +2121,8 @@ ieee80211_deliver_skb(struct ieee80211_rx_data *rx)
                /* deliver to local stack */
                skb->protocol = eth_type_trans(skb, dev);
                memset(skb->cb, 0, sizeof(skb->cb));
-               if (rx->local->napi)
+               if (!(rx->flags & IEEE80211_RX_REORDER_TIMER) &&
+                   rx->local->napi)
                        napi_gro_receive(rx->local->napi, skb);
                else
                        netif_receive_skb(skb);
@@ -3231,7 +3232,7 @@ void ieee80211_release_reorder_timeout(struct sta_info *sta, int tid)
                /* This is OK -- must be QoS data frame */
                .security_idx = tid,
                .seqno_idx = tid,
-               .flags = 0,
+               .flags = IEEE80211_RX_REORDER_TIMER,
        };
        struct tid_ampdu_rx *tid_agg_rx;
 
index 12971b71d0fa1ea8ce69dbdc09314c1c1641d7c8..2880f2ae99abe3a05b6421f53340a42e11a9ca30 100644 (file)
@@ -66,6 +66,7 @@
 
 static const struct rhashtable_params sta_rht_params = {
        .nelem_hint = 3, /* start small */
+       .automatic_shrinking = true,
        .head_offset = offsetof(struct sta_info, hash_node),
        .key_offset = offsetof(struct sta_info, sta.addr),
        .key_len = ETH_ALEN,
@@ -157,8 +158,24 @@ struct sta_info *sta_info_get(struct ieee80211_sub_if_data *sdata,
                              const u8 *addr)
 {
        struct ieee80211_local *local = sdata->local;
+       struct sta_info *sta;
+       struct rhash_head *tmp;
+       const struct bucket_table *tbl;
+
+       rcu_read_lock();
+       tbl = rht_dereference_rcu(local->sta_hash.tbl, &local->sta_hash);
 
-       return rhashtable_lookup_fast(&local->sta_hash, addr, sta_rht_params);
+       for_each_sta_info(local, tbl, addr, sta, tmp) {
+               if (sta->sdata == sdata) {
+                       rcu_read_unlock();
+                       /* this is safe as the caller must already hold
+                        * another rcu read section or the mutex
+                        */
+                       return sta;
+               }
+       }
+       rcu_read_unlock();
+       return NULL;
 }
 
 /*
index 79412f16b61db9953a4a537db3bd5693d7c61cdb..b864ebc6ab8fbf2a09baca02e650e7fe0314cc75 100644 (file)
@@ -2022,6 +2022,9 @@ int ieee80211_reconfig(struct ieee80211_local *local)
        mutex_unlock(&local->sta_mtx);
 
        /* add back keys */
+       list_for_each_entry(sdata, &local->interfaces, list)
+               ieee80211_reset_crypto_tx_tailroom(sdata);
+
        list_for_each_entry(sdata, &local->interfaces, list)
                if (ieee80211_sdata_running(sdata))
                        ieee80211_enable_keys(sdata);
index a4220e92f0cc20c0feb04c307f6ed72097f4fe3a..efa3f48f1ec5d51ea7191c8ae90dda77c0d330e1 100644 (file)
@@ -98,8 +98,7 @@ static u8 *ieee80211_wep_add_iv(struct ieee80211_local *local,
 
        hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PROTECTED);
 
-       if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN ||
-                   skb_headroom(skb) < IEEE80211_WEP_IV_LEN))
+       if (WARN_ON(skb_headroom(skb) < IEEE80211_WEP_IV_LEN))
                return NULL;
 
        hdrlen = ieee80211_hdrlen(hdr->frame_control);
@@ -167,6 +166,9 @@ int ieee80211_wep_encrypt(struct ieee80211_local *local,
        size_t len;
        u8 rc4key[3 + WLAN_KEY_LEN_WEP104];
 
+       if (WARN_ON(skb_tailroom(skb) < IEEE80211_WEP_ICV_LEN))
+               return -1;
+
        iv = ieee80211_wep_add_iv(local, skb, keylen, keyidx);
        if (!iv)
                return -1;
index 5d9f68c75e5f8f68c5884d467e5a9d16db0edaac..70be9c799f8a81596a4e753b239849549d792dd0 100644 (file)
 
 static struct net_device *
 ieee802154_add_iface_deprecated(struct wpan_phy *wpan_phy,
-                               const char *name, int type)
+                               const char *name,
+                               unsigned char name_assign_type, int type)
 {
        struct ieee802154_local *local = wpan_phy_priv(wpan_phy);
        struct net_device *dev;
 
        rtnl_lock();
-       dev = ieee802154_if_add(local, name, type,
+       dev = ieee802154_if_add(local, name, name_assign_type, type,
                                cpu_to_le64(0x0000000000000000ULL));
        rtnl_unlock();
 
@@ -45,12 +46,14 @@ static void ieee802154_del_iface_deprecated(struct wpan_phy *wpan_phy,
 
 static int
 ieee802154_add_iface(struct wpan_phy *phy, const char *name,
+                    unsigned char name_assign_type,
                     enum nl802154_iftype type, __le64 extended_addr)
 {
        struct ieee802154_local *local = wpan_phy_priv(phy);
        struct net_device *err;
 
-       err = ieee802154_if_add(local, name, type, extended_addr);
+       err = ieee802154_if_add(local, name, name_assign_type, type,
+                               extended_addr);
        return PTR_ERR_OR_ZERO(err);
 }
 
index bebd70ffc7a3d101551f023e515db24033883f0b..127ba18386fc639aac4ccda482ebe4be11b8e6ee 100644 (file)
@@ -182,7 +182,8 @@ void ieee802154_iface_exit(void);
 void ieee802154_if_remove(struct ieee802154_sub_if_data *sdata);
 struct net_device *
 ieee802154_if_add(struct ieee802154_local *local, const char *name,
-                 enum nl802154_iftype type, __le64 extended_addr);
+                 unsigned char name_assign_type, enum nl802154_iftype type,
+                 __le64 extended_addr);
 void ieee802154_remove_interfaces(struct ieee802154_local *local);
 
 #endif /* __IEEE802154_I_H */
index 38b56f9d9386a4821e50cfdd6059fa115c5e4358..91b75abbd1a1d05b3219b9089232d9f67eb73ccd 100644 (file)
@@ -522,7 +522,8 @@ ieee802154_setup_sdata(struct ieee802154_sub_if_data *sdata,
 
 struct net_device *
 ieee802154_if_add(struct ieee802154_local *local, const char *name,
-                 enum nl802154_iftype type, __le64 extended_addr)
+                 unsigned char name_assign_type, enum nl802154_iftype type,
+                 __le64 extended_addr)
 {
        struct net_device *ndev = NULL;
        struct ieee802154_sub_if_data *sdata = NULL;
@@ -531,7 +532,7 @@ ieee802154_if_add(struct ieee802154_local *local, const char *name,
        ASSERT_RTNL();
 
        ndev = alloc_netdev(sizeof(*sdata) + local->hw.vif_data_size, name,
-                           NET_NAME_UNKNOWN, ieee802154_if_setup);
+                           name_assign_type, ieee802154_if_setup);
        if (!ndev)
                return ERR_PTR(-ENOMEM);
 
index dcf73958133a0d6d0c28f825f648b13cc9ef478b..5b2be12832e65fca351dec5a61619c1042de9004 100644 (file)
@@ -134,7 +134,7 @@ llsec_key_alloc(const struct ieee802154_llsec_key *template)
        for (i = 0; i < ARRAY_SIZE(key->tfm); i++) {
                key->tfm[i] = crypto_alloc_aead("ccm(aes)", 0,
                                                CRYPTO_ALG_ASYNC);
-               if (!key->tfm[i])
+               if (IS_ERR(key->tfm[i]))
                        goto err_tfm;
                if (crypto_aead_setkey(key->tfm[i], template->key,
                                       IEEE802154_LLSEC_KEY_SIZE))
@@ -144,7 +144,7 @@ llsec_key_alloc(const struct ieee802154_llsec_key *template)
        }
 
        key->tfm0 = crypto_alloc_blkcipher("ctr(aes)", 0, CRYPTO_ALG_ASYNC);
-       if (!key->tfm0)
+       if (IS_ERR(key->tfm0))
                goto err_tfm;
 
        if (crypto_blkcipher_setkey(key->tfm0, template->key,
index 8500378c8318cd3b5b7e3a368f44080b40c5900f..08cb32dc8fd33e892e53f7f87f601b10ede8c38d 100644 (file)
@@ -161,18 +161,21 @@ int ieee802154_register_hw(struct ieee802154_hw *hw)
 
        rtnl_lock();
 
-       dev = ieee802154_if_add(local, "wpan%d", NL802154_IFTYPE_NODE,
+       dev = ieee802154_if_add(local, "wpan%d", NET_NAME_ENUM,
+                               NL802154_IFTYPE_NODE,
                                cpu_to_le64(0x0000000000000000ULL));
        if (IS_ERR(dev)) {
                rtnl_unlock();
                rc = PTR_ERR(dev);
-               goto out_wq;
+               goto out_phy;
        }
 
        rtnl_unlock();
 
        return 0;
 
+out_phy:
+       wpan_phy_unregister(local->phy);
 out_wq:
        destroy_workqueue(local->workqueue);
 out:
index 954810c76a8650d87ec4bddc62fa5618e48c1341..1f93a5978f2ad43fc81a16427e34d07ca2c0f34e 100644 (file)
@@ -541,7 +541,7 @@ static void mpls_ifdown(struct net_device *dev)
 
        RCU_INIT_POINTER(dev->mpls_ptr, NULL);
 
-       kfree(mdev);
+       kfree_rcu(mdev, rcu);
 }
 
 static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
@@ -564,6 +564,17 @@ static int mpls_dev_notify(struct notifier_block *this, unsigned long event,
        case NETDEV_UNREGISTER:
                mpls_ifdown(dev);
                break;
+       case NETDEV_CHANGENAME:
+               mdev = mpls_dev_get(dev);
+               if (mdev) {
+                       int err;
+
+                       mpls_dev_sysctl_unregister(mdev);
+                       err = mpls_dev_sysctl_register(dev, mdev);
+                       if (err)
+                               return notifier_from_errno(err);
+               }
+               break;
        }
        return NOTIFY_OK;
 }
@@ -647,7 +658,7 @@ int nla_get_labels(const struct nlattr *nla,
                        return -EINVAL;
 
                switch (dec.label) {
-               case LABEL_IMPLICIT_NULL:
+               case MPLS_LABEL_IMPLNULL:
                        /* RFC3032: This is a label that an LSR may
                         * assign and distribute, but which never
                         * actually appears in the encapsulation.
@@ -935,7 +946,7 @@ static int resize_platform_label_table(struct net *net, size_t limit)
        }
 
        /* In case the predefined labels need to be populated */
-       if (limit > LABEL_IPV4_EXPLICIT_NULL) {
+       if (limit > MPLS_LABEL_IPV4NULL) {
                struct net_device *lo = net->loopback_dev;
                rt0 = mpls_rt_alloc(lo->addr_len);
                if (!rt0)
@@ -945,7 +956,7 @@ static int resize_platform_label_table(struct net *net, size_t limit)
                rt0->rt_via_table = NEIGH_LINK_TABLE;
                memcpy(rt0->rt_via, lo->dev_addr, lo->addr_len);
        }
-       if (limit > LABEL_IPV6_EXPLICIT_NULL) {
+       if (limit > MPLS_LABEL_IPV6NULL) {
                struct net_device *lo = net->loopback_dev;
                rt2 = mpls_rt_alloc(lo->addr_len);
                if (!rt2)
@@ -973,15 +984,15 @@ static int resize_platform_label_table(struct net *net, size_t limit)
        memcpy(labels, old, cp_size);
 
        /* If needed set the predefined labels */
-       if ((old_limit <= LABEL_IPV6_EXPLICIT_NULL) &&
-           (limit > LABEL_IPV6_EXPLICIT_NULL)) {
-               RCU_INIT_POINTER(labels[LABEL_IPV6_EXPLICIT_NULL], rt2);
+       if ((old_limit <= MPLS_LABEL_IPV6NULL) &&
+           (limit > MPLS_LABEL_IPV6NULL)) {
+               RCU_INIT_POINTER(labels[MPLS_LABEL_IPV6NULL], rt2);
                rt2 = NULL;
        }
 
-       if ((old_limit <= LABEL_IPV4_EXPLICIT_NULL) &&
-           (limit > LABEL_IPV4_EXPLICIT_NULL)) {
-               RCU_INIT_POINTER(labels[LABEL_IPV4_EXPLICIT_NULL], rt0);
+       if ((old_limit <= MPLS_LABEL_IPV4NULL) &&
+           (limit > MPLS_LABEL_IPV4NULL)) {
+               RCU_INIT_POINTER(labels[MPLS_LABEL_IPV4NULL], rt0);
                rt0 = NULL;
        }
 
index 693877d69606a1ecdab02dd78dc74eefdaf26f3f..8cabeb5a1cb928c856c037c5994116df8547fb71 100644 (file)
@@ -1,16 +1,6 @@
 #ifndef MPLS_INTERNAL_H
 #define MPLS_INTERNAL_H
 
-#define LABEL_IPV4_EXPLICIT_NULL       0 /* RFC3032 */
-#define LABEL_ROUTER_ALERT_LABEL       1 /* RFC3032 */
-#define LABEL_IPV6_EXPLICIT_NULL       2 /* RFC3032 */
-#define LABEL_IMPLICIT_NULL            3 /* RFC3032 */
-#define LABEL_ENTROPY_INDICATOR                7 /* RFC6790 */
-#define LABEL_GAL                      13 /* RFC5586 */
-#define LABEL_OAM_ALERT                        14 /* RFC3429 */
-#define LABEL_EXTENSION                        15 /* RFC7274 */
-
-
 struct mpls_shim_hdr {
        __be32 label_stack_entry;
 };
@@ -26,6 +16,7 @@ struct mpls_dev {
        int                     input_enabled;
 
        struct ctl_table_header *sysctl;
+       struct rcu_head         rcu;
 };
 
 struct sk_buff;
index f70e34a68f702ab39c43e27d4b8e8127b49525f6..a0f3e6a3c7d18f344d3321a83b5c11d1988d5d3d 100644 (file)
@@ -863,6 +863,7 @@ config NETFILTER_XT_TARGET_TPROXY
        depends on NETFILTER_XTABLES
        depends on NETFILTER_ADVANCED
        depends on (IPV6 || IPV6=n)
+       depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
        depends on IP_NF_MANGLE
        select NF_DEFRAG_IPV4
        select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
@@ -1356,6 +1357,7 @@ config NETFILTER_XT_MATCH_SOCKET
        depends on NETFILTER_ADVANCED
        depends on !NF_CONNTRACK || NF_CONNTRACK
        depends on (IPV6 || IPV6=n)
+       depends on (IP6_NF_IPTABLES || IP6_NF_IPTABLES=n)
        select NF_DEFRAG_IPV4
        select NF_DEFRAG_IPV6 if IP6_NF_IPTABLES
        help
index 49532672f66dad0c3bae1b923993b0d1f518b25b..285eae3a145483c48c00493651a46a5d81656845 100644 (file)
@@ -3823,6 +3823,9 @@ static void __net_exit ip_vs_control_net_cleanup_sysctl(struct net *net)
        cancel_work_sync(&ipvs->defense_work.work);
        unregister_net_sysctl_table(ipvs->sysctl_hdr);
        ip_vs_stop_estimator(net, &ipvs->tot_stats);
+
+       if (!net_eq(net, &init_net))
+               kfree(ipvs->sysctl_tbl);
 }
 
 #else
index 5caa0c41bf26c3e6a2542f0dd50ac6f029ed8a84..70383de7205460a8ebdadd7fd1ba615fc7681296 100644 (file)
@@ -202,7 +202,7 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
  *     sES -> sES      :-)
  *     sFW -> sCW      Normal close request answered by ACK.
  *     sCW -> sCW
- *     sLA -> sTW      Last ACK detected.
+ *     sLA -> sTW      Last ACK detected (RFC5961 challenged)
  *     sTW -> sTW      Retransmitted last ACK. Remain in the same state.
  *     sCL -> sCL
  */
@@ -261,7 +261,7 @@ static const u8 tcp_conntracks[2][6][TCP_CONNTRACK_MAX] = {
  *     sES -> sES      :-)
  *     sFW -> sCW      Normal close request answered by ACK.
  *     sCW -> sCW
- *     sLA -> sTW      Last ACK detected.
+ *     sLA -> sTW      Last ACK detected (RFC5961 challenged)
  *     sTW -> sTW      Retransmitted last ACK.
  *     sCL -> sCL
  */
@@ -906,6 +906,7 @@ static int tcp_packet(struct nf_conn *ct,
                                        1 : ct->proto.tcp.last_win;
                        ct->proto.tcp.seen[ct->proto.tcp.last_dir].td_scale =
                                ct->proto.tcp.last_wscale;
+                       ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
                        ct->proto.tcp.seen[ct->proto.tcp.last_dir].flags =
                                ct->proto.tcp.last_flags;
                        memset(&ct->proto.tcp.seen[dir], 0,
@@ -923,7 +924,9 @@ static int tcp_packet(struct nf_conn *ct,
                 * may be in sync but we are not. In that case, we annotate
                 * the TCP options and let the packet go through. If it is a
                 * valid SYN packet, the server will reply with a SYN/ACK, and
-                * then we'll get in sync. Otherwise, the server ignores it. */
+                * then we'll get in sync. Otherwise, the server potentially
+                * responds with a challenge ACK if implementing RFC5961.
+                */
                if (index == TCP_SYN_SET && dir == IP_CT_DIR_ORIGINAL) {
                        struct ip_ct_tcp_state seen = {};
 
@@ -939,6 +942,13 @@ static int tcp_packet(struct nf_conn *ct,
                                ct->proto.tcp.last_flags |=
                                        IP_CT_TCP_FLAG_SACK_PERM;
                        }
+                       /* Mark the potential for RFC5961 challenge ACK,
+                        * this pose a special problem for LAST_ACK state
+                        * as ACK is intrepretated as ACKing last FIN.
+                        */
+                       if (old_state == TCP_CONNTRACK_LAST_ACK)
+                               ct->proto.tcp.last_flags |=
+                                       IP_CT_EXP_CHALLENGE_ACK;
                }
                spin_unlock_bh(&ct->lock);
                if (LOG_INVALID(net, IPPROTO_TCP))
@@ -970,6 +980,25 @@ static int tcp_packet(struct nf_conn *ct,
                        nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
                                  "nf_ct_tcp: invalid state ");
                return -NF_ACCEPT;
+       case TCP_CONNTRACK_TIME_WAIT:
+               /* RFC5961 compliance cause stack to send "challenge-ACK"
+                * e.g. in response to spurious SYNs.  Conntrack MUST
+                * not believe this ACK is acking last FIN.
+                */
+               if (old_state == TCP_CONNTRACK_LAST_ACK &&
+                   index == TCP_ACK_SET &&
+                   ct->proto.tcp.last_dir != dir &&
+                   ct->proto.tcp.last_index == TCP_SYN_SET &&
+                   (ct->proto.tcp.last_flags & IP_CT_EXP_CHALLENGE_ACK)) {
+                       /* Detected RFC5961 challenge ACK */
+                       ct->proto.tcp.last_flags &= ~IP_CT_EXP_CHALLENGE_ACK;
+                       spin_unlock_bh(&ct->lock);
+                       if (LOG_INVALID(net, IPPROTO_TCP))
+                               nf_log_packet(net, pf, 0, skb, NULL, NULL, NULL,
+                                     "nf_ct_tcp: challenge-ACK ignored ");
+                       return NF_ACCEPT; /* Don't change state */
+               }
+               break;
        case TCP_CONNTRACK_CLOSE:
                if (index == TCP_RST_SET
                    && (ct->proto.tcp.seen[!dir].flags & IP_CT_TCP_FLAG_MAXACK_SET)
index ad9d11fb29fd208cd44d67e786b7977afc9e51e7..34ded09317e715cc94b80ce8d918006bbe1f714b 100644 (file)
@@ -4472,9 +4472,9 @@ EXPORT_SYMBOL_GPL(nft_data_init);
  */
 void nft_data_uninit(const struct nft_data *data, enum nft_data_types type)
 {
-       switch (type) {
-       case NFT_DATA_VALUE:
+       if (type < NFT_DATA_VERDICT)
                return;
+       switch (type) {
        case NFT_DATA_VERDICT:
                return nft_verdict_uninit(data);
        default:
index 3ad91266c821489500fbc8cbbcfc7bfd774b6f48..4ef1fae8445ed5d00183e3b15c7ca18133957983 100644 (file)
@@ -1073,7 +1073,13 @@ static struct pernet_operations nfnl_log_net_ops = {
 
 static int __init nfnetlink_log_init(void)
 {
-       int status = -ENOMEM;
+       int status;
+
+       status = register_pernet_subsys(&nfnl_log_net_ops);
+       if (status < 0) {
+               pr_err("failed to register pernet ops\n");
+               goto out;
+       }
 
        netlink_register_notifier(&nfulnl_rtnl_notifier);
        status = nfnetlink_subsys_register(&nfulnl_subsys);
@@ -1088,28 +1094,23 @@ static int __init nfnetlink_log_init(void)
                goto cleanup_subsys;
        }
 
-       status = register_pernet_subsys(&nfnl_log_net_ops);
-       if (status < 0) {
-               pr_err("failed to register pernet ops\n");
-               goto cleanup_logger;
-       }
        return status;
 
-cleanup_logger:
-       nf_log_unregister(&nfulnl_logger);
 cleanup_subsys:
        nfnetlink_subsys_unregister(&nfulnl_subsys);
 cleanup_netlink_notifier:
        netlink_unregister_notifier(&nfulnl_rtnl_notifier);
+       unregister_pernet_subsys(&nfnl_log_net_ops);
+out:
        return status;
 }
 
 static void __exit nfnetlink_log_fini(void)
 {
-       unregister_pernet_subsys(&nfnl_log_net_ops);
        nf_log_unregister(&nfulnl_logger);
        nfnetlink_subsys_unregister(&nfulnl_subsys);
        netlink_unregister_notifier(&nfulnl_rtnl_notifier);
+       unregister_pernet_subsys(&nfnl_log_net_ops);
 }
 
 MODULE_DESCRIPTION("netfilter userspace logging");
index 0b98c74202390ae79598ceb955360f937bb9556d..11c7682fa0ea1fbd13c90a38126f6a77efcac537 100644 (file)
@@ -1317,7 +1317,13 @@ static struct pernet_operations nfnl_queue_net_ops = {
 
 static int __init nfnetlink_queue_init(void)
 {
-       int status = -ENOMEM;
+       int status;
+
+       status = register_pernet_subsys(&nfnl_queue_net_ops);
+       if (status < 0) {
+               pr_err("nf_queue: failed to register pernet ops\n");
+               goto out;
+       }
 
        netlink_register_notifier(&nfqnl_rtnl_notifier);
        status = nfnetlink_subsys_register(&nfqnl_subsys);
@@ -1326,19 +1332,13 @@ static int __init nfnetlink_queue_init(void)
                goto cleanup_netlink_notifier;
        }
 
-       status = register_pernet_subsys(&nfnl_queue_net_ops);
-       if (status < 0) {
-               pr_err("nf_queue: failed to register pernet ops\n");
-               goto cleanup_subsys;
-       }
        register_netdevice_notifier(&nfqnl_dev_notifier);
        nf_register_queue_handler(&nfqh);
        return status;
 
-cleanup_subsys:
-       nfnetlink_subsys_unregister(&nfqnl_subsys);
 cleanup_netlink_notifier:
        netlink_unregister_notifier(&nfqnl_rtnl_notifier);
+out:
        return status;
 }
 
@@ -1346,9 +1346,9 @@ static void __exit nfnetlink_queue_fini(void)
 {
        nf_unregister_queue_handler();
        unregister_netdevice_notifier(&nfqnl_dev_notifier);
-       unregister_pernet_subsys(&nfnl_queue_net_ops);
        nfnetlink_subsys_unregister(&nfqnl_subsys);
        netlink_unregister_notifier(&nfqnl_rtnl_notifier);
+       unregister_pernet_subsys(&nfnl_queue_net_ops);
 
        rcu_barrier(); /* Wait for completion of call_rcu()'s */
 }
index ec4adbdcb9b4a9926b4d992906f51787c57dc6a2..bf6e76643f7876d8dee4df261baf077aad837be3 100644 (file)
@@ -89,7 +89,7 @@ static inline int netlink_is_kernel(struct sock *sk)
        return nlk_sk(sk)->flags & NETLINK_KERNEL_SOCKET;
 }
 
-struct netlink_table *nl_table;
+struct netlink_table *nl_table __read_mostly;
 EXPORT_SYMBOL_GPL(nl_table);
 
 static DECLARE_WAIT_QUEUE_HEAD(nl_table_wait);
@@ -1081,6 +1081,7 @@ static int netlink_insert(struct sock *sk, u32 portid)
        if (err) {
                if (err == -EEXIST)
                        err = -EADDRINUSE;
+               nlk_sk(sk)->portid = 0;
                sock_put(sk);
        }
 
@@ -3139,7 +3140,6 @@ static const struct rhashtable_params netlink_rhashtable_params = {
        .key_len = netlink_compare_arg_len,
        .obj_hashfn = netlink_hash,
        .obj_cmpfn = netlink_compare,
-       .max_size = 65536,
        .automatic_shrinking = true,
 };
 
index 4776282c64175209924740fbd87a56de8e05b609..33e6d6e2908f553516c5ca97c4b93abee7b7057b 100644 (file)
@@ -125,6 +125,7 @@ static struct vport *netdev_create(const struct vport_parms *parms)
        if (err)
                goto error_master_upper_dev_unlink;
 
+       dev_disable_lro(netdev_vport->dev);
        dev_set_promiscuity(netdev_vport->dev, 1);
        netdev_vport->dev->priv_flags |= IFF_OVS_DATAPATH;
        rtnl_unlock();
index 5102c3cc4eec4ecec6698859935d7769d37a174c..b5989c6ee5513904127a8ffec31d09589094c8f6 100644 (file)
@@ -2311,11 +2311,14 @@ static int tpacket_snd(struct packet_sock *po, struct msghdr *msg)
                tlen = dev->needed_tailroom;
                skb = sock_alloc_send_skb(&po->sk,
                                hlen + tlen + sizeof(struct sockaddr_ll),
-                               0, &err);
+                               !need_wait, &err);
 
-               if (unlikely(skb == NULL))
+               if (unlikely(skb == NULL)) {
+                       /* we assume the socket was initially writeable ... */
+                       if (likely(len_sum > 0))
+                               err = len_sum;
                        goto out_status;
-
+               }
                tp_len = tpacket_fill_skb(po, skb, ph, dev, size_max, proto,
                                          addr, hlen);
                if (tp_len > dev->mtu + dev->hard_header_len) {
index 14f041398ca1744ea7596decaad7145184c7df0c..da6da57e5f36b5cc13a5bc92abfedb6a5ccea45d 100644 (file)
@@ -126,7 +126,10 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
        struct rds_transport *loop_trans;
        unsigned long flags;
        int ret;
+       struct rds_transport *otrans = trans;
 
+       if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP)
+               goto new_conn;
        rcu_read_lock();
        conn = rds_conn_lookup(head, laddr, faddr, trans);
        if (conn && conn->c_loopback && conn->c_trans != &rds_loop_transport &&
@@ -142,6 +145,7 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
        if (conn)
                goto out;
 
+new_conn:
        conn = kmem_cache_zalloc(rds_conn_slab, gfp);
        if (!conn) {
                conn = ERR_PTR(-ENOMEM);
@@ -230,13 +234,22 @@ static struct rds_connection *__rds_conn_create(__be32 laddr, __be32 faddr,
                /* Creating normal conn */
                struct rds_connection *found;
 
-               found = rds_conn_lookup(head, laddr, faddr, trans);
+               if (!is_outgoing && otrans->t_type == RDS_TRANS_TCP)
+                       found = NULL;
+               else
+                       found = rds_conn_lookup(head, laddr, faddr, trans);
                if (found) {
                        trans->conn_free(conn->c_transport_data);
                        kmem_cache_free(rds_conn_slab, conn);
                        conn = found;
                } else {
-                       hlist_add_head_rcu(&conn->c_hash_node, head);
+                       if ((is_outgoing && otrans->t_type == RDS_TRANS_TCP) ||
+                           (otrans->t_type != RDS_TRANS_TCP)) {
+                               /* Only the active side should be added to
+                                * reconnect list for TCP.
+                                */
+                               hlist_add_head_rcu(&conn->c_hash_node, head);
+                       }
                        rds_cong_add_conn(conn);
                        rds_conn_count++;
                }
index 31b74f5e61adbd37535b636b1499c384bdd992f5..8a09ee7db3c13bdd833784c4ee311e048a7c2789 100644 (file)
@@ -183,8 +183,17 @@ void rds_ib_cm_connect_complete(struct rds_connection *conn, struct rdma_cm_even
 
        /* If the peer gave us the last packet it saw, process this as if
         * we had received a regular ACK. */
-       if (dp && dp->dp_ack_seq)
-               rds_send_drop_acked(conn, be64_to_cpu(dp->dp_ack_seq), NULL);
+       if (dp) {
+               /* dp structure start is not guaranteed to be 8 bytes aligned.
+                * Since dp_ack_seq is 64-bit extended load operations can be
+                * used so go through get_unaligned to avoid unaligned errors.
+                */
+               __be64 dp_ack_seq = get_unaligned(&dp->dp_ack_seq);
+
+               if (dp_ack_seq)
+                       rds_send_drop_acked(conn, be64_to_cpu(dp_ack_seq),
+                                           NULL);
+       }
 
        rds_connect_complete(conn);
 }
index f9f564a6c960e47b3c243d11a3e11317c5a56353..973109c7b8e86f21bec783eb9e4e118e6e8ebb8b 100644 (file)
@@ -62,6 +62,7 @@ void rds_tcp_state_change(struct sock *sk)
                case TCP_ESTABLISHED:
                        rds_connect_complete(conn);
                        break;
+               case TCP_CLOSE_WAIT:
                case TCP_CLOSE:
                        rds_conn_drop(conn);
                default:
index 23ab4dcd1d9f03942aa4d70bc7f6d9aa401f7707..0da49e34495f1e974bf466b83ba1144ec8965f61 100644 (file)
@@ -45,12 +45,45 @@ static void rds_tcp_accept_worker(struct work_struct *work);
 static DECLARE_WORK(rds_tcp_listen_work, rds_tcp_accept_worker);
 static struct socket *rds_tcp_listen_sock;
 
+static int rds_tcp_keepalive(struct socket *sock)
+{
+       /* values below based on xs_udp_default_timeout */
+       int keepidle = 5; /* send a probe 'keepidle' secs after last data */
+       int keepcnt = 5; /* number of unack'ed probes before declaring dead */
+       int keepalive = 1;
+       int ret = 0;
+
+       ret = kernel_setsockopt(sock, SOL_SOCKET, SO_KEEPALIVE,
+                               (char *)&keepalive, sizeof(keepalive));
+       if (ret < 0)
+               goto bail;
+
+       ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_KEEPCNT,
+                               (char *)&keepcnt, sizeof(keepcnt));
+       if (ret < 0)
+               goto bail;
+
+       ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_KEEPIDLE,
+                               (char *)&keepidle, sizeof(keepidle));
+       if (ret < 0)
+               goto bail;
+
+       /* KEEPINTVL is the interval between successive probes. We follow
+        * the model in xs_tcp_finish_connecting() and re-use keepidle.
+        */
+       ret = kernel_setsockopt(sock, IPPROTO_TCP, TCP_KEEPINTVL,
+                               (char *)&keepidle, sizeof(keepidle));
+bail:
+       return ret;
+}
+
 static int rds_tcp_accept_one(struct socket *sock)
 {
        struct socket *new_sock = NULL;
        struct rds_connection *conn;
        int ret;
        struct inet_sock *inet;
+       struct rds_tcp_connection *rs_tcp;
 
        ret = sock_create_lite(sock->sk->sk_family, sock->sk->sk_type,
                               sock->sk->sk_protocol, &new_sock);
@@ -63,6 +96,10 @@ static int rds_tcp_accept_one(struct socket *sock)
        if (ret < 0)
                goto out;
 
+       ret = rds_tcp_keepalive(new_sock);
+       if (ret < 0)
+               goto out;
+
        rds_tcp_tune(new_sock);
 
        inet = inet_sk(new_sock->sk);
@@ -77,6 +114,15 @@ static int rds_tcp_accept_one(struct socket *sock)
                ret = PTR_ERR(conn);
                goto out;
        }
+       /* An incoming SYN request came in, and TCP just accepted it.
+        * We always create a new conn for listen side of TCP, and do not
+        * add it to the c_hash_list.
+        *
+        * If the client reboots, this conn will need to be cleaned up.
+        * rds_tcp_state_change() will do that cleanup
+        */
+       rs_tcp = (struct rds_tcp_connection *)conn->c_transport_data;
+       WARN_ON(!rs_tcp || rs_tcp->t_sock);
 
        /*
         * see the comment above rds_queue_delayed_reconnect()
index 8b0470e418dc6e9475464768d629969087e66b37..a75864d93142153bfff4ab765620e10bcfab3e96 100644 (file)
@@ -81,6 +81,11 @@ int unregister_tcf_proto_ops(struct tcf_proto_ops *ops)
        struct tcf_proto_ops *t;
        int rc = -ENOENT;
 
+       /* Wait for outstanding call_rcu()s, if any, from a
+        * tcf_proto_ops's destroy() handler.
+        */
+       rcu_barrier();
+
        write_lock(&cls_mod_lock);
        list_for_each_entry(t, &tcf_proto_base, head) {
                if (t == ops) {
@@ -308,12 +313,11 @@ replay:
                case RTM_DELTFILTER:
                        err = tp->ops->delete(tp, fh);
                        if (err == 0) {
-                               tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
-                               if (tcf_destroy(tp, false)) {
-                                       struct tcf_proto *next = rtnl_dereference(tp->next);
+                               struct tcf_proto *next = rtnl_dereference(tp->next);
 
+                               tfilter_notify(net, skb, n, tp, fh, RTM_DELTFILTER);
+                               if (tcf_destroy(tp, false))
                                        RCU_INIT_POINTER(*back, next);
-                               }
                        }
                        goto errout;
                case RTM_GETTFILTER:
index ad9eed70bc8f8e16c3118c6527374a952823e2c0..73a123daa2cc5c4c43c69120d1fecd273df76c17 100644 (file)
@@ -815,10 +815,8 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
                if (dev->flags & IFF_UP)
                        dev_deactivate(dev);
 
-               if (new && new->ops->attach) {
-                       new->ops->attach(new);
-                       num_q = 0;
-               }
+               if (new && new->ops->attach)
+                       goto skip;
 
                for (i = 0; i < num_q; i++) {
                        struct netdev_queue *dev_queue = dev_ingress_queue(dev);
@@ -834,12 +832,16 @@ static int qdisc_graft(struct net_device *dev, struct Qdisc *parent,
                                qdisc_destroy(old);
                }
 
+skip:
                if (!ingress) {
                        notify_and_destroy(net, skb, n, classid,
                                           dev->qdisc, new);
                        if (new && !new->ops->attach)
                                atomic_inc(&new->refcnt);
                        dev->qdisc = new ? : &noop_qdisc;
+
+                       if (new && new->ops->attach)
+                               new->ops->attach(new);
                } else {
                        notify_and_destroy(net, skb, n, classid, old, new);
                }
@@ -1883,13 +1885,10 @@ EXPORT_SYMBOL(tcf_destroy_chain);
 #ifdef CONFIG_PROC_FS
 static int psched_show(struct seq_file *seq, void *v)
 {
-       struct timespec ts;
-
-       hrtimer_get_res(CLOCK_MONOTONIC, &ts);
        seq_printf(seq, "%08x %08x %08x %08x\n",
                   (u32)NSEC_PER_USEC, (u32)PSCHED_TICKS2NS(1),
                   1000000,
-                  (u32)NSEC_PER_SEC/(u32)ktime_to_ns(timespec_to_ktime(ts)));
+                  (u32)NSEC_PER_SEC / hrtimer_resolution);
 
        return 0;
 }
index de28f8e968e8176ac7630a1e6fcccb45ad295f5d..7a0bdb16ac92fd0a20f565295392bed8674c8d90 100644 (file)
@@ -164,7 +164,7 @@ static int codel_init(struct Qdisc *sch, struct nlattr *opt)
 
        sch->limit = DEFAULT_CODEL_LIMIT;
 
-       codel_params_init(&q->params);
+       codel_params_init(&q->params, sch);
        codel_vars_init(&q->vars);
        codel_stats_init(&q->stats);
 
index 1e52decb7b59cf0b4173d0f17efdab8fefee5f26..c244c45b78d7feca32fda3b925f7605aebf0a5b6 100644 (file)
@@ -391,7 +391,7 @@ static int fq_codel_init(struct Qdisc *sch, struct nlattr *opt)
        q->perturbation = prandom_u32();
        INIT_LIST_HEAD(&q->new_flows);
        INIT_LIST_HEAD(&q->old_flows);
-       codel_params_init(&q->cparams);
+       codel_params_init(&q->cparams, sch);
        codel_stats_init(&q->cstats);
        q->cparams.ecn = true;
 
index a4ca4517cdc82843e21e5245989a59e89aa53702..634529e0ce6bddc44b48161b6f76bd79af0a2a8e 100644 (file)
@@ -229,7 +229,7 @@ static int gred_enqueue(struct sk_buff *skb, struct Qdisc *sch)
                break;
        }
 
-       if (q->backlog + qdisc_pkt_len(skb) <= q->limit) {
+       if (gred_backlog(t, q, sch) + qdisc_pkt_len(skb) <= q->limit) {
                q->backlog += qdisc_pkt_len(skb);
                return qdisc_enqueue_tail(skb, sch);
        }
@@ -553,7 +553,7 @@ static int gred_dump(struct Qdisc *sch, struct sk_buff *skb)
 
                opt.limit       = q->limit;
                opt.DP          = q->DP;
-               opt.backlog     = q->backlog;
+               opt.backlog     = gred_backlog(table, q, sch);
                opt.prio        = q->prio;
                opt.qth_min     = q->parms.qth_min >> q->parms.Wlog;
                opt.qth_max     = q->parms.qth_max >> q->parms.Wlog;
index fb7976aee61c84f38aecdc5c5f0d8be20e577fa9..4f15b7d730e13d6aaa58ba7a28262c9831afea95 100644 (file)
@@ -381,13 +381,14 @@ nomem:
 }
 
 
-/* Public interface to creat the association shared key.
+/* Public interface to create the association shared key.
  * See code above for the algorithm.
  */
 int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp)
 {
        struct sctp_auth_bytes  *secret;
        struct sctp_shared_key *ep_key;
+       struct sctp_chunk *chunk;
 
        /* If we don't support AUTH, or peer is not capable
         * we don't need to do anything.
@@ -410,6 +411,14 @@ int sctp_auth_asoc_init_active_key(struct sctp_association *asoc, gfp_t gfp)
        sctp_auth_key_put(asoc->asoc_shared_key);
        asoc->asoc_shared_key = secret;
 
+       /* Update send queue in case any chunk already in there now
+        * needs authenticating
+        */
+       list_for_each_entry(chunk, &asoc->outqueue.out_chunk_list, list) {
+               if (sctp_auth_send_cid(chunk->chunk_hdr->type, asoc))
+                       chunk->auth = 1;
+       }
+
        return 0;
 }
 
index 1ec19f6f0c2b9fe71ee9a7110873873274b9ab62..eeeba5adee6d939ab3429100d231a46e82b1ff94 100644 (file)
@@ -793,20 +793,26 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
 {
        u32 value_follows;
        int err;
+       struct page *scratch;
+
+       scratch = alloc_page(GFP_KERNEL);
+       if (!scratch)
+               return -ENOMEM;
+       xdr_set_scratch_buffer(xdr, page_address(scratch), PAGE_SIZE);
 
        /* res->status */
        err = gssx_dec_status(xdr, &res->status);
        if (err)
-               return err;
+               goto out_free;
 
        /* res->context_handle */
        err = gssx_dec_bool(xdr, &value_follows);
        if (err)
-               return err;
+               goto out_free;
        if (value_follows) {
                err = gssx_dec_ctx(xdr, res->context_handle);
                if (err)
-                       return err;
+                       goto out_free;
        } else {
                res->context_handle = NULL;
        }
@@ -814,11 +820,11 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
        /* res->output_token */
        err = gssx_dec_bool(xdr, &value_follows);
        if (err)
-               return err;
+               goto out_free;
        if (value_follows) {
                err = gssx_dec_buffer(xdr, res->output_token);
                if (err)
-                       return err;
+                       goto out_free;
        } else {
                res->output_token = NULL;
        }
@@ -826,14 +832,17 @@ int gssx_dec_accept_sec_context(struct rpc_rqst *rqstp,
        /* res->delegated_cred_handle */
        err = gssx_dec_bool(xdr, &value_follows);
        if (err)
-               return err;
+               goto out_free;
        if (value_follows) {
                /* we do not support upcall servers sending this data. */
-               return -EINVAL;
+               err = -EINVAL;
+               goto out_free;
        }
 
        /* res->options */
        err = gssx_dec_option_array(xdr, &res->options);
 
+out_free:
+       __free_page(scratch);
        return err;
 }
index 46568b85c3339f57a0d6835e82eff4b67c3ba326..055453d486683ec19433961db220292e4f60571d 100644 (file)
@@ -338,7 +338,7 @@ int netdev_switch_fib_ipv4_add(u32 dst, int dst_len, struct fib_info *fi,
                                              fi, tos, type, nlflags,
                                              tb_id);
                if (!err)
-                       fi->fib_flags |= RTNH_F_EXTERNAL;
+                       fi->fib_flags |= RTNH_F_OFFLOAD;
        }
 
        return err;
@@ -364,7 +364,7 @@ int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
        const struct swdev_ops *ops;
        int err = 0;
 
-       if (!(fi->fib_flags & RTNH_F_EXTERNAL))
+       if (!(fi->fib_flags & RTNH_F_OFFLOAD))
                return 0;
 
        dev = netdev_switch_get_dev_by_nhs(fi);
@@ -376,7 +376,7 @@ int netdev_switch_fib_ipv4_del(u32 dst, int dst_len, struct fib_info *fi,
                err = ops->swdev_fib_ipv4_del(dev, htonl(dst), dst_len,
                                              fi, tos, type, tb_id);
                if (!err)
-                       fi->fib_flags &= ~RTNH_F_EXTERNAL;
+                       fi->fib_flags &= ~RTNH_F_OFFLOAD;
        }
 
        return err;
index 9074b5cede38b8edd75890b684a706d96b9f71ba..f485600c4507bc152cef654ae5667a03a52d990c 100644 (file)
@@ -2142,11 +2142,17 @@ static void tipc_sk_timeout(unsigned long data)
        peer_node = tsk_peer_node(tsk);
 
        if (tsk->probing_state == TIPC_CONN_PROBING) {
-               /* Previous probe not answered -> self abort */
-               skb = tipc_msg_create(TIPC_CRITICAL_IMPORTANCE,
-                                     TIPC_CONN_MSG, SHORT_H_SIZE, 0,
-                                     own_node, peer_node, tsk->portid,
-                                     peer_port, TIPC_ERR_NO_PORT);
+               if (!sock_owned_by_user(sk)) {
+                       sk->sk_socket->state = SS_DISCONNECTING;
+                       tsk->connected = 0;
+                       tipc_node_remove_conn(sock_net(sk), tsk_peer_node(tsk),
+                                             tsk_peer_port(tsk));
+                       sk->sk_state_change(sk);
+               } else {
+                       /* Try again later */
+                       sk_reset_timer(sk, &sk->sk_timer, (HZ / 20));
+               }
+
        } else {
                skb = tipc_msg_create(CONN_MANAGER, CONN_PROBE,
                                      INT_H_SIZE, 0, peer_node, own_node,
index 5266ea7b922b76d1977dea57cc7c227594c49285..06430598cf512fdaff480671620e8fa69c259bb5 100644 (file)
@@ -1880,6 +1880,10 @@ static long unix_stream_data_wait(struct sock *sk, long timeo,
                unix_state_unlock(sk);
                timeo = freezable_schedule_timeout(timeo);
                unix_state_lock(sk);
+
+               if (sock_flag(sk, SOCK_DEAD))
+                       break;
+
                clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags);
        }
 
@@ -1939,6 +1943,10 @@ static int unix_stream_recvmsg(struct socket *sock, struct msghdr *msg,
                struct sk_buff *skb, *last;
 
                unix_state_lock(sk);
+               if (sock_flag(sk, SOCK_DEAD)) {
+                       err = -ECONNRESET;
+                       goto unlock;
+               }
                last = skb = skb_peek(&sk->sk_receive_queue);
 again:
                if (skb == NULL) {
index fff1bef6ed6d916f9019a63d708652f4ab07cddf..fd682832a0e3635d52c734871d5402d270336dc3 100644 (file)
@@ -1333,6 +1333,8 @@ static struct iw_statistics *cfg80211_wireless_stats(struct net_device *dev)
        memcpy(bssid, wdev->current_bss->pub.bssid, ETH_ALEN);
        wdev_unlock(wdev);
 
+       memset(&sinfo, 0, sizeof(sinfo));
+
        if (rdev_get_station(rdev, dev, bssid, &sinfo))
                return NULL;
 
index 526c4feb3b50d723d24b8c55288c8c941257da52..b58286ecd156fdb9de2a33ca0ede0fe3194bf289 100644 (file)
@@ -13,6 +13,8 @@
 #include <net/dst.h>
 #include <net/ip.h>
 #include <net/xfrm.h>
+#include <net/ip_tunnels.h>
+#include <net/ip6_tunnel.h>
 
 static struct kmem_cache *secpath_cachep __read_mostly;
 
@@ -186,6 +188,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
        struct xfrm_state *x = NULL;
        xfrm_address_t *daddr;
        struct xfrm_mode *inner_mode;
+       u32 mark = skb->mark;
        unsigned int family;
        int decaps = 0;
        int async = 0;
@@ -203,6 +206,18 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
                                   XFRM_SPI_SKB_CB(skb)->daddroff);
        family = XFRM_SPI_SKB_CB(skb)->family;
 
+       /* if tunnel is present override skb->mark value with tunnel i_key */
+       if (XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4) {
+               switch (family) {
+               case AF_INET:
+                       mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip4->parms.i_key);
+                       break;
+               case AF_INET6:
+                       mark = be32_to_cpu(XFRM_TUNNEL_SKB_CB(skb)->tunnel.ip6->parms.i_key);
+                       break;
+               }
+       }
+
        /* Allocate new secpath or COW existing one. */
        if (!skb->sp || atomic_read(&skb->sp->refcnt) != 1) {
                struct sec_path *sp;
@@ -229,7 +244,7 @@ int xfrm_input(struct sk_buff *skb, int nexthdr, __be32 spi, int encap_type)
                        goto drop;
                }
 
-               x = xfrm_state_lookup(net, skb->mark, daddr, spi, nexthdr, family);
+               x = xfrm_state_lookup(net, mark, daddr, spi, nexthdr, family);
                if (x == NULL) {
                        XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
                        xfrm_audit_state_notfound(skb, family, spi, seq);
index dab57daae40856030790fa8070068a59d82220af..4fd725a0c500ebf69a02e06fcf37ae3035ae0d98 100644 (file)
@@ -99,6 +99,7 @@ static int xfrm_replay_overflow(struct xfrm_state *x, struct sk_buff *skb)
 
        if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
                XFRM_SKB_CB(skb)->seq.output.low = ++x->replay.oseq;
+               XFRM_SKB_CB(skb)->seq.output.hi = 0;
                if (unlikely(x->replay.oseq == 0)) {
                        x->replay.oseq--;
                        xfrm_audit_state_replay_overflow(x, skb);
@@ -177,6 +178,7 @@ static int xfrm_replay_overflow_bmp(struct xfrm_state *x, struct sk_buff *skb)
 
        if (x->type->flags & XFRM_TYPE_REPLAY_PROT) {
                XFRM_SKB_CB(skb)->seq.output.low = ++replay_esn->oseq;
+               XFRM_SKB_CB(skb)->seq.output.hi = 0;
                if (unlikely(replay_esn->oseq == 0)) {
                        replay_esn->oseq--;
                        xfrm_audit_state_replay_overflow(x, skb);
index f5e39e35d73aa96c3551b0e46f9b26ab291d23aa..96688cd0f6f11bddee4451de1d09a9a8e5f212dd 100644 (file)
@@ -927,8 +927,8 @@ struct xfrm_state *xfrm_state_lookup_byspi(struct net *net, __be32 spi,
                        x->id.spi != spi)
                        continue;
 
-               spin_unlock_bh(&net->xfrm.xfrm_state_lock);
                xfrm_state_hold(x);
+               spin_unlock_bh(&net->xfrm.xfrm_state_lock);
                return x;
        }
        spin_unlock_bh(&net->xfrm.xfrm_state_lock);
index 89b1df4e72ab3423bce45011fb03f86c193f5ad4..c5ec977b9c3786097b214e1c835efd8fa337c173 100755 (executable)
@@ -3169,12 +3169,12 @@ sub process {
                }
 
 # check for global initialisers.
-               if ($line =~ /^\+(\s*$Type\s*$Ident\s*(?:\s+$Modifier))*\s*=\s*(0|NULL|false)\s*;/) {
+               if ($line =~ /^\+$Type\s*$Ident(?:\s+$Modifier)*\s*=\s*(?:0|NULL|false)\s*;/) {
                        if (ERROR("GLOBAL_INITIALISERS",
                                  "do not initialise globals to 0 or NULL\n" .
                                      $herecurr) &&
                            $fix) {
-                               $fixed[$fixlinenr] =~ s/($Type\s*$Ident\s*(?:\s+$Modifier))*\s*=\s*(0|NULL|false)\s*;/$1;/;
+                               $fixed[$fixlinenr] =~ s/(^.$Type\s*$Ident(?:\s+$Modifier)*)\s*=\s*(0|NULL|false)\s*;/$1;/;
                        }
                }
 # check for static initialisers.
index 5b3add31f9f1202610e67e4d4868598e5846e594..2c9082ba61376fd7fb5dd375ca2afac1bb44d53a 100755 (executable)
@@ -212,5 +212,5 @@ EOF
     )
 }
 
-(ignore_list && syscall_list $(dirname $0)/../arch/x86/syscalls/syscall_32.tbl) | \
+(ignore_list && syscall_list $(dirname $0)/../arch/x86/entry/syscalls/syscall_32.tbl) | \
 $* -E -x c - > /dev/null
index a1504c4f19003d6d4a57971471b2d873f4e2bb09..25db8cff44a2036c0ecf1da69c3da5c0d567b782 100644 (file)
@@ -73,18 +73,11 @@ class LxLsmod(gdb.Command):
                 "        " if utils.get_long_type().sizeof == 8 else ""))
 
         for module in module_list():
-            ref = 0
-            module_refptr = module['refptr']
-            for cpu in cpus.cpu_list("cpu_possible_mask"):
-                refptr = cpus.per_cpu(module_refptr, cpu)
-                ref += refptr['incs']
-                ref -= refptr['decs']
-
             gdb.write("{address} {name:<19} {size:>8}  {ref}".format(
                 address=str(module['module_core']).split()[0],
                 name=module['name'].string(),
                 size=str(module['core_size']),
-                ref=str(ref)))
+                ref=str(module['refcnt']['counter'])))
 
             source_list = module['source_list']
             t = self._module_use_type.get_type().pointer()
index 0d03fcc489a49ee3221b1369ca2c1ff931c691cd..7d3f38fe02ba6ca7d75446c6d20e41c049b17b00 100644 (file)
@@ -209,8 +209,8 @@ static int cap_inode_readlink(struct dentry *dentry)
        return 0;
 }
 
-static int cap_inode_follow_link(struct dentry *dentry,
-                                struct nameidata *nameidata)
+static int cap_inode_follow_link(struct dentry *dentry, struct inode *inode,
+                                bool rcu)
 {
        return 0;
 }
index 8e9b1f4b9b45dfac98287fe969b869a3a29fb2bc..04c8feca081a3bafc8dde4f95ebc44badda4e5a5 100644 (file)
@@ -581,11 +581,12 @@ int security_inode_readlink(struct dentry *dentry)
        return security_ops->inode_readlink(dentry);
 }
 
-int security_inode_follow_link(struct dentry *dentry, struct nameidata *nd)
+int security_inode_follow_link(struct dentry *dentry, struct inode *inode,
+                              bool rcu)
 {
-       if (unlikely(IS_PRIVATE(d_backing_inode(dentry))))
+       if (unlikely(IS_PRIVATE(inode)))
                return 0;
-       return security_ops->inode_follow_link(dentry, nd);
+       return security_ops->inode_follow_link(dentry, inode, rcu);
 }
 
 int security_inode_permission(struct inode *inode, int mask)
index 3c17dda9571d4e97f7f460e162a6195bf215758b..0b122b1421a9dcc7dfd26ac6f80d00d1c6a0d55e 100644 (file)
@@ -761,7 +761,23 @@ int avc_has_perm(u32 ssid, u32 tsid, u16 tclass,
 
        rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd);
 
-       rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata);
+       rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc, auditdata, 0);
+       if (rc2)
+               return rc2;
+       return rc;
+}
+
+int avc_has_perm_flags(u32 ssid, u32 tsid, u16 tclass,
+                      u32 requested, struct common_audit_data *auditdata,
+                      int flags)
+{
+       struct av_decision avd;
+       int rc, rc2;
+
+       rc = avc_has_perm_noaudit(ssid, tsid, tclass, requested, 0, &avd);
+
+       rc2 = avc_audit(ssid, tsid, tclass, requested, &avd, rc,
+                       auditdata, flags);
        if (rc2)
                return rc2;
        return rc;
index 7dade28affba5a0ebc0944be49dbd59dbf5c8761..ffa5a642629a1cbf16467f7bc0ebd3b20cdf02f0 100644 (file)
@@ -1564,7 +1564,7 @@ static int cred_has_capability(const struct cred *cred,
 
        rc = avc_has_perm_noaudit(sid, sid, sclass, av, 0, &avd);
        if (audit == SECURITY_CAP_AUDIT) {
-               int rc2 = avc_audit(sid, sid, sclass, av, &avd, rc, &ad);
+               int rc2 = avc_audit(sid, sid, sclass, av, &avd, rc, &ad, 0);
                if (rc2)
                        return rc2;
        }
@@ -2861,11 +2861,23 @@ static int selinux_inode_readlink(struct dentry *dentry)
        return dentry_has_perm(cred, dentry, FILE__READ);
 }
 
-static int selinux_inode_follow_link(struct dentry *dentry, struct nameidata *nameidata)
+static int selinux_inode_follow_link(struct dentry *dentry, struct inode *inode,
+                                    bool rcu)
 {
        const struct cred *cred = current_cred();
+       struct common_audit_data ad;
+       struct inode_security_struct *isec;
+       u32 sid;
 
-       return dentry_has_perm(cred, dentry, FILE__READ);
+       validate_creds(cred);
+
+       ad.type = LSM_AUDIT_DATA_DENTRY;
+       ad.u.dentry = dentry;
+       sid = cred_sid(cred);
+       isec = inode->i_security;
+
+       return avc_has_perm_flags(sid, isec->sid, isec->sclass, FILE__READ, &ad,
+                                 rcu ? MAY_NOT_BLOCK : 0);
 }
 
 static noinline int audit_inode_permission(struct inode *inode,
index ddf8eec03f211757845de5378afd1c2d5ebfe774..5973c327c54e712edba1034808defd01afa8a8a0 100644 (file)
@@ -130,7 +130,8 @@ static inline int avc_audit(u32 ssid, u32 tsid,
                            u16 tclass, u32 requested,
                            struct av_decision *avd,
                            int result,
-                           struct common_audit_data *a)
+                           struct common_audit_data *a,
+                           int flags)
 {
        u32 audited, denied;
        audited = avc_audit_required(requested, avd, result, 0, &denied);
@@ -138,7 +139,7 @@ static inline int avc_audit(u32 ssid, u32 tsid,
                return 0;
        return slow_avc_audit(ssid, tsid, tclass,
                              requested, audited, denied, result,
-                             a, 0);
+                             a, flags);
 }
 
 #define AVC_STRICT 1 /* Ignore permissive mode. */
@@ -150,6 +151,10 @@ int avc_has_perm_noaudit(u32 ssid, u32 tsid,
 int avc_has_perm(u32 ssid, u32 tsid,
                 u16 tclass, u32 requested,
                 struct common_audit_data *auditdata);
+int avc_has_perm_flags(u32 ssid, u32 tsid,
+                      u16 tclass, u32 requested,
+                      struct common_audit_data *auditdata,
+                      int flags);
 
 u32 avc_policy_seqno(void);
 
index cf4cedf2b420f12de4d46cf702fabeab2c136b02..6dad042630d8c4ab22f2e9c2f4a98823d1c5092f 100644 (file)
@@ -916,7 +916,6 @@ static struct ac97c_platform_data *atmel_ac97c_probe_dt(struct device *dev)
 {
        struct ac97c_platform_data *pdata;
        struct device_node *node = dev->of_node;
-       const struct of_device_id *match;
 
        if (!node) {
                dev_err(dev, "Device does not have associated DT data\n");
index 886be7da989d1ab52d647105ef3648a8045facbe..f845ecf7e172935f938bc52ecfe9c95c7bac4e11 100644 (file)
@@ -121,16 +121,9 @@ static struct snd_timer *mytimer;
 static int __init snd_hrtimer_init(void)
 {
        struct snd_timer *timer;
-       struct timespec tp;
        int err;
 
-       hrtimer_get_res(CLOCK_MONOTONIC, &tp);
-       if (tp.tv_sec > 0 || !tp.tv_nsec) {
-               pr_err("snd-hrtimer: Invalid resolution %u.%09u",
-                          (unsigned)tp.tv_sec, (unsigned)tp.tv_nsec);
-               return -EINVAL;
-       }
-       resolution = tp.tv_nsec;
+       resolution = hrtimer_resolution;
 
        /* Create a new timer and set up the fields */
        err = snd_timer_global_new("hrtimer", SNDRV_TIMER_GLOBAL_HRTIMER,
index ac6b33f3779c2ee8a08fe80aa64172d3858d452b..7d45645f10ba99e33b54b3218559778db3aca36a 100644 (file)
@@ -339,7 +339,7 @@ static int snd_pcm_update_hw_ptr0(struct snd_pcm_substream *substream,
                if (delta > new_hw_ptr) {
                        /* check for double acknowledged interrupts */
                        hdelta = curr_jiffies - runtime->hw_ptr_jiffies;
-                       if (hdelta > runtime->hw_ptr_buffer_jiffies/2) {
+                       if (hdelta > runtime->hw_ptr_buffer_jiffies/2 + 1) {
                                hw_base += runtime->buffer_size;
                                if (hw_base >= runtime->boundary) {
                                        hw_base = 0;
index d9647bd84d0f49e9b532fa1cbeb685645dfe33b9..27e25bb78c9782a5ba9fff5933d495eb4f200878 100644 (file)
@@ -42,16 +42,13 @@ struct snd_pcsp pcsp_chip;
 static int snd_pcsp_create(struct snd_card *card)
 {
        static struct snd_device_ops ops = { };
-       struct timespec tp;
-       int err;
-       int div, min_div, order;
-
-       hrtimer_get_res(CLOCK_MONOTONIC, &tp);
+       unsigned int resolution = hrtimer_resolution;
+       int err, div, min_div, order;
 
        if (!nopcm) {
-               if (tp.tv_sec || tp.tv_nsec > PCSP_MAX_PERIOD_NS) {
+               if (resolution > PCSP_MAX_PERIOD_NS) {
                        printk(KERN_ERR "PCSP: Timer resolution is not sufficient "
-                               "(%linS)\n", tp.tv_nsec);
+                               "(%unS)\n", resolution);
                        printk(KERN_ERR "PCSP: Make sure you have HPET and ACPI "
                                "enabled.\n");
                        printk(KERN_ERR "PCSP: Turned into nopcm mode.\n");
@@ -59,13 +56,13 @@ static int snd_pcsp_create(struct snd_card *card)
                }
        }
 
-       if (loops_per_jiffy >= PCSP_MIN_LPJ && tp.tv_nsec <= PCSP_MIN_PERIOD_NS)
+       if (loops_per_jiffy >= PCSP_MIN_LPJ && resolution <= PCSP_MIN_PERIOD_NS)
                min_div = MIN_DIV;
        else
                min_div = MAX_DIV;
 #if PCSP_DEBUG
-       printk(KERN_DEBUG "PCSP: lpj=%li, min_div=%i, res=%li\n",
-              loops_per_jiffy, min_div, tp.tv_nsec);
+       printk(KERN_DEBUG "PCSP: lpj=%li, min_div=%i, res=%u\n",
+              loops_per_jiffy, min_div, resolution);
 #endif
 
        div = MAX_DIV / min_div;
index 7371e0c3926f32a9104b521d0bf70f1c35f0740f..1eabcdf69457311129b766ec237d37e402f640bc 100644 (file)
@@ -246,6 +246,9 @@ static int hda_reg_read(void *context, unsigned int reg, unsigned int *val)
                return hda_reg_read_stereo_amp(codec, reg, val);
        if (verb == AC_VERB_GET_PROC_COEF)
                return hda_reg_read_coef(codec, reg, val);
+       if ((verb & 0x700) == AC_VERB_SET_AMP_GAIN_MUTE)
+               reg &= ~AC_AMP_FAKE_MUTE;
+
        err = snd_hdac_exec_verb(codec, reg, 0, val);
        if (err < 0)
                return err;
@@ -265,6 +268,9 @@ static int hda_reg_write(void *context, unsigned int reg, unsigned int val)
        unsigned int verb;
        int i, bytes, err;
 
+       if (codec->caps_overwriting)
+               return 0;
+
        reg &= ~0x00080000U; /* drop GET bit */
        reg |= (codec->addr << 28);
        verb = get_verb(reg);
@@ -280,6 +286,8 @@ static int hda_reg_write(void *context, unsigned int reg, unsigned int val)
 
        switch (verb & 0xf00) {
        case AC_VERB_SET_AMP_GAIN_MUTE:
+               if ((reg & AC_AMP_FAKE_MUTE) && (val & AC_AMP_MUTE))
+                       val = 0;
                verb = AC_VERB_SET_AMP_GAIN_MUTE;
                if (reg & AC_AMP_GET_LEFT)
                        verb |= AC_AMP_SET_LEFT >> 8;
index d2f615ab177a7ca021d9f802d61ba2b57b10a7ca..2153d31fb66312025cb6221afd8476d313261785 100644 (file)
@@ -12,12 +12,14 @@ if SND_MIPS
 config SND_SGI_O2
        tristate "SGI O2 Audio"
        depends on SGI_IP32
+       select SND_PCM
         help
                 Sound support for the SGI O2 Workstation. 
 
 config SND_SGI_HAL2
         tristate "SGI HAL2 Audio"
         depends on SGI_HAS_HAL2
+       select SND_PCM
         help
                 Sound support for the SGI Indy and Indigo2 Workstation.
 
index 6610bd096fc93560fd463c5abf5bce44840497be..d17937b92331e4c1160d1cebb1ed77398a684a01 100644 (file)
@@ -32,6 +32,7 @@
 #include <linux/pci.h>
 #include <linux/stringify.h>
 #include <linux/module.h>
+#include <linux/vmalloc.h>
 
 #ifdef MODULE_FIRMWARE
 MODULE_FIRMWARE("asihpi/dsp5000.bin");
index b49feff0a31982e7c22071c08e8d088e91a97727..5645481af3d9571b8340c963a27c34e377c405c5 100644 (file)
@@ -436,7 +436,7 @@ static unsigned int get_num_devices(struct hda_codec *codec, hda_nid_t nid)
            get_wcaps_type(wcaps) != AC_WID_PIN)
                return 0;
 
-       parm = snd_hda_param_read(codec, nid, AC_PAR_DEVLIST_LEN);
+       parm = snd_hdac_read_parm_uncached(&codec->core, nid, AC_PAR_DEVLIST_LEN);
        if (parm == -1 && codec->bus->rirb_error)
                parm = 0;
        return parm & AC_DEV_LIST_LEN_MASK;
@@ -1375,6 +1375,31 @@ int snd_hda_override_amp_caps(struct hda_codec *codec, hda_nid_t nid, int dir,
 }
 EXPORT_SYMBOL_GPL(snd_hda_override_amp_caps);
 
+/**
+ * snd_hda_codec_amp_update - update the AMP mono value
+ * @codec: HD-audio codec
+ * @nid: NID to read the AMP value
+ * @ch: channel to update (0 or 1)
+ * @dir: #HDA_INPUT or #HDA_OUTPUT
+ * @idx: the index value (only for input direction)
+ * @mask: bit mask to set
+ * @val: the bits value to set
+ *
+ * Update the AMP values for the given channel, direction and index.
+ */
+int snd_hda_codec_amp_update(struct hda_codec *codec, hda_nid_t nid,
+                            int ch, int dir, int idx, int mask, int val)
+{
+       unsigned int cmd = snd_hdac_regmap_encode_amp(nid, ch, dir, idx);
+
+       /* enable fake mute if no h/w mute but min=mute */
+       if ((query_amp_caps(codec, nid, dir) &
+            (AC_AMPCAP_MUTE | AC_AMPCAP_MIN_MUTE)) == AC_AMPCAP_MIN_MUTE)
+               cmd |= AC_AMP_FAKE_MUTE;
+       return snd_hdac_regmap_update_raw(&codec->core, cmd, mask, val);
+}
+EXPORT_SYMBOL_GPL(snd_hda_codec_amp_update);
+
 /**
  * snd_hda_codec_amp_stereo - update the AMP stereo values
  * @codec: HD-audio codec
index 788f969b1a680e50f61940e31fe1a4609ccce402..ac0db1679f098ee4ec08c6770fe1f1374c8bf431 100644 (file)
@@ -844,8 +844,16 @@ static hda_nid_t path_power_update(struct hda_codec *codec,
                        snd_hda_codec_write(codec, nid, 0,
                                            AC_VERB_SET_POWER_STATE, state);
                        changed = nid;
+                       /* all known codecs seem to be capable to handl
+                        * widgets state even in D3, so far.
+                        * if any new codecs need to restore the widget
+                        * states after D0 transition, call the function
+                        * below.
+                        */
+#if 0 /* disabled */
                        if (state == AC_PWRST_D0)
                                snd_hdac_regmap_sync_node(&codec->core, nid);
+#endif
                }
        }
        return changed;
@@ -4918,9 +4926,12 @@ int snd_hda_gen_parse_auto_config(struct hda_codec *codec,
  dig_only:
        parse_digital(codec);
 
-       if (spec->power_down_unused || codec->power_save_node)
+       if (spec->power_down_unused || codec->power_save_node) {
                if (!codec->power_filter)
                        codec->power_filter = snd_hda_gen_path_power_filter;
+               if (!codec->patch_ops.stream_pm)
+                       codec->patch_ops.stream_pm = snd_hda_gen_stream_pm;
+       }
 
        if (!spec->no_analog && spec->beep_nid) {
                err = snd_hda_attach_beep_device(codec, spec->beep_nid);
index 34040d26c94ff04c84e8ee1cb2115eb76b0e755d..b6db25b23dd316d0205d6a14fed365f5ae8cde4f 100644 (file)
@@ -340,6 +340,11 @@ enum {
 #define use_vga_switcheroo(chip)       0
 #endif
 
+#define CONTROLLER_IN_GPU(pci) (((pci)->device == 0x0a0c) || \
+                                       ((pci)->device == 0x0c0c) || \
+                                       ((pci)->device == 0x0d0c) || \
+                                       ((pci)->device == 0x160c))
+
 static char *driver_short_names[] = {
        [AZX_DRIVER_ICH] = "HDA Intel",
        [AZX_DRIVER_PCH] = "HDA Intel PCH",
@@ -1854,8 +1859,17 @@ static int azx_probe_continue(struct azx *chip)
        if (chip->driver_caps & AZX_DCAPS_I915_POWERWELL) {
 #ifdef CONFIG_SND_HDA_I915
                err = hda_i915_init(hda);
-               if (err < 0)
-                       goto out_free;
+               if (err < 0) {
+                       /* if the controller is bound only with HDMI/DP
+                        * (for HSW and BDW), we need to abort the probe;
+                        * for other chips, still continue probing as other
+                        * codecs can be on the same link.
+                        */
+                       if (CONTROLLER_IN_GPU(pci))
+                               goto out_free;
+                       else
+                               goto skip_i915;
+               }
                err = hda_display_power(hda, true);
                if (err < 0) {
                        dev_err(chip->card->dev,
@@ -1865,6 +1879,9 @@ static int azx_probe_continue(struct azx *chip)
 #endif
        }
 
+#ifdef CONFIG_SND_HDA_I915
+ skip_i915:
+#endif
        err = azx_first_init(chip);
        if (err < 0)
                goto out_free;
@@ -2089,6 +2106,8 @@ static const struct pci_device_id azx_ids[] = {
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        { PCI_DEVICE(0x1002, 0xaab0),
          .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
+       { PCI_DEVICE(0x1002, 0xaac8),
+         .driver_data = AZX_DRIVER_ATIHDMI_NS | AZX_DCAPS_PRESET_ATI_HDMI_NS },
        /* VIA VT8251/VT8237A */
        { PCI_DEVICE(0x1106, 0x3288),
          .driver_data = AZX_DRIVER_VIA | AZX_DCAPS_POSFIX_VIA },
index 3b567f42296b9d6b2ca148c66c59c12b5628cb9e..bed66c3144318de3f82dcddeb2445a84ad9ce594 100644 (file)
@@ -129,8 +129,8 @@ int snd_hda_mixer_amp_switch_put_beep(struct snd_kcontrol *kcontrol,
 /* lowlevel accessor with caching; use carefully */
 #define snd_hda_codec_amp_read(codec, nid, ch, dir, idx) \
        snd_hdac_regmap_get_amp(&(codec)->core, nid, ch, dir, idx)
-#define snd_hda_codec_amp_update(codec, nid, ch, dir, idx, mask, val) \
-       snd_hdac_regmap_update_amp(&(codec)->core, nid, ch, dir, idx, mask, val)
+int snd_hda_codec_amp_update(struct hda_codec *codec, hda_nid_t nid,
+                            int ch, int dir, int idx, int mask, int val);
 int snd_hda_codec_amp_stereo(struct hda_codec *codec, hda_nid_t nid,
                             int dir, int idx, int mask, int val);
 int snd_hda_codec_amp_init(struct hda_codec *codec, hda_nid_t nid, int ch,
index f8f0dfbef1494538f2d2a370dc792cf860e68868..78b719b5b34dd4959b6c3e755f3625cef61b022d 100644 (file)
@@ -968,6 +968,14 @@ static const struct hda_codec_preset snd_hda_preset_conexant[] = {
          .patch = patch_conexant_auto },
        { .id = 0x14f150b9, .name = "CX20665",
          .patch = patch_conexant_auto },
+       { .id = 0x14f150f1, .name = "CX20721",
+         .patch = patch_conexant_auto },
+       { .id = 0x14f150f2, .name = "CX20722",
+         .patch = patch_conexant_auto },
+       { .id = 0x14f150f3, .name = "CX20723",
+         .patch = patch_conexant_auto },
+       { .id = 0x14f150f4, .name = "CX20724",
+         .patch = patch_conexant_auto },
        { .id = 0x14f1510f, .name = "CX20751/2",
          .patch = patch_conexant_auto },
        { .id = 0x14f15110, .name = "CX20751/2",
@@ -1002,6 +1010,10 @@ MODULE_ALIAS("snd-hda-codec-id:14f150ab");
 MODULE_ALIAS("snd-hda-codec-id:14f150ac");
 MODULE_ALIAS("snd-hda-codec-id:14f150b8");
 MODULE_ALIAS("snd-hda-codec-id:14f150b9");
+MODULE_ALIAS("snd-hda-codec-id:14f150f1");
+MODULE_ALIAS("snd-hda-codec-id:14f150f2");
+MODULE_ALIAS("snd-hda-codec-id:14f150f3");
+MODULE_ALIAS("snd-hda-codec-id:14f150f4");
 MODULE_ALIAS("snd-hda-codec-id:14f1510f");
 MODULE_ALIAS("snd-hda-codec-id:14f15110");
 MODULE_ALIAS("snd-hda-codec-id:14f15111");
index e2afd53cc14c7356b84d80e3c2aaed30d3c8ab49..6d010452c1f5c5d131c0ac0a4ae3b2c539e56ad9 100644 (file)
@@ -883,6 +883,8 @@ static struct alc_codec_rename_pci_table rename_pci_tbl[] = {
        { 0x10ec0668, 0x1028, 0, "ALC3661" },
        { 0x10ec0275, 0x1028, 0, "ALC3260" },
        { 0x10ec0899, 0x1028, 0, "ALC3861" },
+       { 0x10ec0298, 0x1028, 0, "ALC3266" },
+       { 0x10ec0256, 0x1028, 0, "ALC3246" },
        { 0x10ec0670, 0x1025, 0, "ALC669X" },
        { 0x10ec0676, 0x1025, 0, "ALC679X" },
        { 0x10ec0282, 0x1043, 0, "ALC3229" },
@@ -2166,6 +2168,7 @@ static const struct hda_fixup alc882_fixups[] = {
 static const struct snd_pci_quirk alc882_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x006c, "Acer Aspire 9810", ALC883_FIXUP_ACER_EAPD),
        SND_PCI_QUIRK(0x1025, 0x0090, "Acer Aspire", ALC883_FIXUP_ACER_EAPD),
+       SND_PCI_QUIRK(0x1025, 0x0107, "Acer Aspire", ALC883_FIXUP_ACER_EAPD),
        SND_PCI_QUIRK(0x1025, 0x010a, "Acer Ferrari 5000", ALC883_FIXUP_ACER_EAPD),
        SND_PCI_QUIRK(0x1025, 0x0110, "Acer Aspire", ALC883_FIXUP_ACER_EAPD),
        SND_PCI_QUIRK(0x1025, 0x0112, "Acer Aspire 9303", ALC883_FIXUP_ACER_EAPD),
@@ -3673,6 +3676,10 @@ static void alc_headset_mode_mic_in(struct hda_codec *codec, hda_nid_t hp_pin,
                alc_process_coef_fw(codec, coef0293);
                snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
                break;
+       case 0x10ec0662:
+               snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
+               snd_hda_set_pin_ctl_cache(codec, mic_pin, PIN_VREF50);
+               break;
        case 0x10ec0668:
                alc_write_coef_idx(codec, 0x11, 0x0001);
                snd_hda_set_pin_ctl_cache(codec, hp_pin, 0);
@@ -3738,7 +3745,6 @@ static void alc_headset_mode_default(struct hda_codec *codec)
        case 0x10ec0288:
                alc_process_coef_fw(codec, coef0288);
                break;
-               break;
        case 0x10ec0292:
                alc_process_coef_fw(codec, coef0292);
                break;
@@ -4012,7 +4018,7 @@ static void alc_update_headset_mode(struct hda_codec *codec)
        if (new_headset_mode != ALC_HEADSET_MODE_MIC) {
                snd_hda_set_pin_ctl_cache(codec, hp_pin,
                                          AC_PINCTL_OUT_EN | AC_PINCTL_HP_EN);
-               if (spec->headphone_mic_pin)
+               if (spec->headphone_mic_pin && spec->headphone_mic_pin != hp_pin)
                        snd_hda_set_pin_ctl_cache(codec, spec->headphone_mic_pin,
                                                  PIN_VREFHIZ);
        }
@@ -4215,6 +4221,23 @@ static void alc_fixup_dell_xps13(struct hda_codec *codec,
        }
 }
 
+static void alc_fixup_headset_mode_alc662(struct hda_codec *codec,
+                               const struct hda_fixup *fix, int action)
+{
+       struct alc_spec *spec = codec->spec;
+
+       if (action == HDA_FIXUP_ACT_PRE_PROBE) {
+               spec->parse_flags |= HDA_PINCFG_HEADSET_MIC;
+               spec->gen.hp_mic = 1; /* Mic-in is same pin as headphone */
+
+               /* Disable boost for mic-in permanently. (This code is only called
+                  from quirks that guarantee that the headphone is at NID 0x1b.) */
+               snd_hda_codec_write(codec, 0x1b, 0, AC_VERB_SET_AMP_GAIN_MUTE, 0x7000);
+               snd_hda_override_wcaps(codec, 0x1b, get_wcaps(codec, 0x1b) & ~AC_WCAP_IN_AMP);
+       } else
+               alc_fixup_headset_mode(codec, fix, action);
+}
+
 static void alc_fixup_headset_mode_alc668(struct hda_codec *codec,
                                const struct hda_fixup *fix, int action)
 {
@@ -4492,6 +4515,8 @@ enum {
        ALC288_FIXUP_DELL_HEADSET_MODE,
        ALC288_FIXUP_DELL1_MIC_NO_PRESENCE,
        ALC288_FIXUP_DELL_XPS_13_GPIO6,
+       ALC292_FIXUP_DELL_E7X,
+       ALC292_FIXUP_DISABLE_AAMIX,
 };
 
 static const struct hda_fixup alc269_fixups[] = {
@@ -5014,6 +5039,16 @@ static const struct hda_fixup alc269_fixups[] = {
                .chained = true,
                .chain_id = ALC288_FIXUP_DELL1_MIC_NO_PRESENCE
        },
+       [ALC292_FIXUP_DISABLE_AAMIX] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc_fixup_disable_aamix,
+       },
+       [ALC292_FIXUP_DELL_E7X] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc_fixup_dell_xps13,
+               .chained = true,
+               .chain_id = ALC292_FIXUP_DISABLE_AAMIX
+       },
 };
 
 static const struct snd_pci_quirk alc269_fixup_tbl[] = {
@@ -5026,6 +5061,8 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1025, 0x0775, "Acer Aspire E1-572", ALC271_FIXUP_HP_GATE_MIC_JACK_E1_572),
        SND_PCI_QUIRK(0x1025, 0x079b, "Acer Aspire V5-573G", ALC282_FIXUP_ASPIRE_V5_PINS),
        SND_PCI_QUIRK(0x1028, 0x0470, "Dell M101z", ALC269_FIXUP_DELL_M101Z),
+       SND_PCI_QUIRK(0x1028, 0x05ca, "Dell Latitude E7240", ALC292_FIXUP_DELL_E7X),
+       SND_PCI_QUIRK(0x1028, 0x05cb, "Dell Latitude E7440", ALC292_FIXUP_DELL_E7X),
        SND_PCI_QUIRK(0x1028, 0x05da, "Dell Vostro 5460", ALC290_FIXUP_SUBWOOFER),
        SND_PCI_QUIRK(0x1028, 0x05f4, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x05f5, "Dell", ALC269_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -5035,6 +5072,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x1028, 0x0638, "Dell Inspiron 5439", ALC290_FIXUP_MONO_SPEAKERS_HSJACK),
        SND_PCI_QUIRK(0x1028, 0x064a, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x064b, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
+       SND_PCI_QUIRK(0x1028, 0x0665, "Dell XPS 13", ALC292_FIXUP_DELL_E7X),
        SND_PCI_QUIRK(0x1028, 0x06c7, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x06d9, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
        SND_PCI_QUIRK(0x1028, 0x06da, "Dell", ALC293_FIXUP_DELL1_MIC_NO_PRESENCE),
@@ -5119,6 +5157,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x104d, 0x9099, "Sony VAIO S13", ALC275_FIXUP_SONY_DISABLE_AAMIX),
        SND_PCI_QUIRK(0x10cf, 0x1475, "Lifebook", ALC269_FIXUP_LIFEBOOK),
        SND_PCI_QUIRK(0x10cf, 0x15dc, "Lifebook T731", ALC269_FIXUP_LIFEBOOK_HP_PIN),
+       SND_PCI_QUIRK(0x10cf, 0x1757, "Lifebook E752", ALC269_FIXUP_LIFEBOOK_HP_PIN),
        SND_PCI_QUIRK(0x10cf, 0x1845, "Lifebook U904", ALC269_FIXUP_LIFEBOOK_EXTMIC),
        SND_PCI_QUIRK(0x144d, 0xc109, "Samsung Ativ book 9 (NP900X3G)", ALC269_FIXUP_INV_DMIC),
        SND_PCI_QUIRK(0x1458, 0xfa53, "Gigabyte BXBT-2807", ALC283_FIXUP_BXBT2807_MIC),
@@ -5148,6 +5187,7 @@ static const struct snd_pci_quirk alc269_fixup_tbl[] = {
        SND_PCI_QUIRK(0x17aa, 0x5026, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x5034, "Thinkpad T450", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x5036, "Thinkpad T450s", ALC292_FIXUP_TPT440_DOCK),
+       SND_PCI_QUIRK(0x17aa, 0x503c, "Thinkpad L450", ALC292_FIXUP_TPT440_DOCK),
        SND_PCI_QUIRK(0x17aa, 0x5109, "Thinkpad", ALC269_FIXUP_LIMIT_INT_MIC_BOOST),
        SND_PCI_QUIRK(0x17aa, 0x3bf8, "Quanta FL1", ALC269_FIXUP_PCM_44K),
        SND_PCI_QUIRK(0x17aa, 0x9e54, "LENOVO NB", ALC269_FIXUP_LENOVO_EAPD),
@@ -5345,6 +5385,20 @@ static const struct snd_hda_pin_quirk alc269_pin_fixup_tbl[] = {
                {0x17, 0x40000000},
                {0x1d, 0x40700001},
                {0x21, 0x02211050}),
+       SND_HDA_PIN_QUIRK(0x10ec0255, 0x1028, "Dell Inspiron 5548", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               ALC255_STANDARD_PINS,
+               {0x12, 0x90a60180},
+               {0x14, 0x90170130},
+               {0x17, 0x40000000},
+               {0x1d, 0x40700001},
+               {0x21, 0x02211040}),
+       SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
+               ALC255_STANDARD_PINS,
+               {0x12, 0x90a60160},
+               {0x14, 0x90170120},
+               {0x17, 0x40000000},
+               {0x1d, 0x40700001},
+               {0x21, 0x02211030}),
        SND_HDA_PIN_QUIRK(0x10ec0256, 0x1028, "Dell", ALC255_FIXUP_DELL1_MIC_NO_PRESENCE,
                ALC256_STANDARD_PINS,
                {0x13, 0x40000000}),
@@ -6079,7 +6133,9 @@ enum {
        ALC662_FIXUP_NO_JACK_DETECT,
        ALC662_FIXUP_ZOTAC_Z68,
        ALC662_FIXUP_INV_DMIC,
+       ALC662_FIXUP_DELL_MIC_NO_PRESENCE,
        ALC668_FIXUP_DELL_MIC_NO_PRESENCE,
+       ALC662_FIXUP_HEADSET_MODE,
        ALC668_FIXUP_HEADSET_MODE,
        ALC662_FIXUP_BASS_MODE4_CHMAP,
        ALC662_FIXUP_BASS_16,
@@ -6272,6 +6328,20 @@ static const struct hda_fixup alc662_fixups[] = {
                .chained = true,
                .chain_id = ALC668_FIXUP_DELL_MIC_NO_PRESENCE
        },
+       [ALC662_FIXUP_DELL_MIC_NO_PRESENCE] = {
+               .type = HDA_FIXUP_PINS,
+               .v.pins = (const struct hda_pintbl[]) {
+                       { 0x19, 0x03a1113c }, /* use as headset mic, without its own jack detect */
+                       /* headphone mic by setting pin control of 0x1b (headphone out) to in + vref_50 */
+                       { }
+               },
+               .chained = true,
+               .chain_id = ALC662_FIXUP_HEADSET_MODE
+       },
+       [ALC662_FIXUP_HEADSET_MODE] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = alc_fixup_headset_mode_alc662,
+       },
        [ALC668_FIXUP_DELL_MIC_NO_PRESENCE] = {
                .type = HDA_FIXUP_PINS,
                .v.pins = (const struct hda_pintbl[]) {
@@ -6423,6 +6493,18 @@ static const struct hda_model_fixup alc662_fixup_models[] = {
 };
 
 static const struct snd_hda_pin_quirk alc662_pin_fixup_tbl[] = {
+       SND_HDA_PIN_QUIRK(0x10ec0662, 0x1028, "Dell", ALC662_FIXUP_DELL_MIC_NO_PRESENCE,
+               {0x12, 0x4004c000},
+               {0x14, 0x01014010},
+               {0x15, 0x411111f0},
+               {0x16, 0x411111f0},
+               {0x18, 0x01a19020},
+               {0x19, 0x411111f0},
+               {0x1a, 0x0181302f},
+               {0x1b, 0x0221401f},
+               {0x1c, 0x411111f0},
+               {0x1d, 0x4054c601},
+               {0x1e, 0x411111f0}),
        SND_HDA_PIN_QUIRK(0x10ec0668, 0x1028, "Dell", ALC668_FIXUP_AUTO_MUTE,
                {0x12, 0x99a30130},
                {0x14, 0x90170110},
index 43c99ce4a520c3fc24a720f6c76ba4222403ad3b..6c66d7e164391b7e824e2c72b5fa2e5ae889f16d 100644 (file)
@@ -100,6 +100,7 @@ enum {
        STAC_HP_ENVY_BASS,
        STAC_HP_BNB13_EQ,
        STAC_HP_ENVY_TS_BASS,
+       STAC_HP_ENVY_TS_DAC_BIND,
        STAC_92HD83XXX_GPIO10_EAPD,
        STAC_92HD83XXX_MODELS
 };
@@ -2171,6 +2172,22 @@ static void stac92hd83xxx_fixup_gpio10_eapd(struct hda_codec *codec,
        spec->eapd_switch = 0;
 }
 
+static void hp_envy_ts_fixup_dac_bind(struct hda_codec *codec,
+                                           const struct hda_fixup *fix,
+                                           int action)
+{
+       struct sigmatel_spec *spec = codec->spec;
+       static hda_nid_t preferred_pairs[] = {
+               0xd, 0x13,
+               0
+       };
+
+       if (action != HDA_FIXUP_ACT_PRE_PROBE)
+               return;
+
+       spec->gen.preferred_dacs = preferred_pairs;
+}
+
 static const struct hda_verb hp_bnb13_eq_verbs[] = {
        /* 44.1KHz base */
        { 0x22, 0x7A6, 0x3E },
@@ -2686,6 +2703,12 @@ static const struct hda_fixup stac92hd83xxx_fixups[] = {
                        {}
                },
        },
+       [STAC_HP_ENVY_TS_DAC_BIND] = {
+               .type = HDA_FIXUP_FUNC,
+               .v.func = hp_envy_ts_fixup_dac_bind,
+               .chained = true,
+               .chain_id = STAC_HP_ENVY_TS_BASS,
+       },
        [STAC_92HD83XXX_GPIO10_EAPD] = {
                .type = HDA_FIXUP_FUNC,
                .v.func = stac92hd83xxx_fixup_gpio10_eapd,
@@ -2764,6 +2787,8 @@ static const struct snd_pci_quirk stac92hd83xxx_fixup_tbl[] = {
                          "HP bNB13", STAC_HP_BNB13_EQ),
        SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x190e,
                          "HP ENVY TS", STAC_HP_ENVY_TS_BASS),
+       SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1967,
+                         "HP ENVY TS", STAC_HP_ENVY_TS_DAC_BIND),
        SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1940,
                          "HP bNB13", STAC_HP_BNB13_EQ),
        SND_PCI_QUIRK(PCI_VENDOR_ID_HP, 0x1941,
@@ -4403,7 +4428,6 @@ static const struct hda_codec_ops stac_patch_ops = {
 #ifdef CONFIG_PM
        .suspend = stac_suspend,
 #endif
-       .stream_pm = snd_hda_gen_stream_pm,
        .reboot_notify = stac_shutup,
 };
 
@@ -4697,7 +4721,8 @@ static int patch_stac92hd71bxx(struct hda_codec *codec)
                return err;
 
        spec = codec->spec;
-       codec->power_save_node = 1;
+       /* disabled power_save_node since it causes noises on a Dell machine */
+       /* codec->power_save_node = 1; */
        spec->linear_tone_beep = 0;
        spec->gen.own_eapd_ctl = 1;
        spec->gen.power_down_unused = 1;
index 31a95cca015d4d1c34a1facff2e226b6821a5203..bab6c04932aa050ff63f054bf172acf288f5ee5e 100644 (file)
@@ -449,6 +449,15 @@ static int via_suspend(struct hda_codec *codec)
 
        return 0;
 }
+
+static int via_resume(struct hda_codec *codec)
+{
+       /* some delay here to make jack detection working (bko#98921) */
+       msleep(10);
+       codec->patch_ops.init(codec);
+       regcache_sync(codec->core.regmap);
+       return 0;
+}
 #endif
 
 #ifdef CONFIG_PM
@@ -475,6 +484,7 @@ static const struct hda_codec_ops via_patch_ops = {
        .stream_pm = snd_hda_gen_stream_pm,
 #ifdef CONFIG_PM
        .suspend = via_suspend,
+       .resume = via_resume,
        .check_power_status = via_check_power_status,
 #endif
 };
index d51703e305238700bce9da8184971ffa516247ca..0a4ad5feb82e7817f7036f86c973d02b1dfecb9b 100644 (file)
@@ -72,7 +72,6 @@ static void hda_fixup_thinkpad_acpi(struct hda_codec *codec,
                if (led_set_func(TPACPI_LED_MUTE, false) >= 0) {
                        old_vmaster_hook = spec->vmaster_mute.hook;
                        spec->vmaster_mute.hook = update_tpacpi_mute_led;
-                       spec->vmaster_mute_enum = 1;
                        removefunc = false;
                }
                if (led_set_func(TPACPI_LED_MICMUTE, false) >= 0) {
index 2ffb9a0570dc8db3625cc8224f034995ea88d7dd..3d44fc50e4d0c112d35627d01491e2f07674c459 100644 (file)
@@ -623,14 +623,14 @@ static int mc13783_probe(struct snd_soc_codec *codec)
                                AUDIO_SSI_SEL, 0);
        else
                mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_CODEC,
-                               0, AUDIO_SSI_SEL);
+                               AUDIO_SSI_SEL, AUDIO_SSI_SEL);
 
        if (priv->dac_ssi_port == MC13783_SSI1_PORT)
                mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_DAC,
                                AUDIO_SSI_SEL, 0);
        else
                mc13xxx_reg_rmw(priv->mc13xxx, MC13783_AUDIO_DAC,
-                               0, AUDIO_SSI_SEL);
+                               AUDIO_SSI_SEL, AUDIO_SSI_SEL);
 
        return 0;
 }
index dc7778b6dd7f6fa40b76b598e3441aa5c2c5d571..c3c33bd0df1c5c2c4d94bdcf93a498b62744d23e 100644 (file)
@@ -437,7 +437,7 @@ static int uda1380_set_dai_fmt_both(struct snd_soc_dai *codec_dai,
        if ((fmt & SND_SOC_DAIFMT_MASTER_MASK) != SND_SOC_DAIFMT_CBS_CFS)
                return -EINVAL;
 
-       uda1380_write(codec, UDA1380_IFACE, iface);
+       uda1380_write_reg_cache(codec, UDA1380_IFACE, iface);
 
        return 0;
 }
index 3035d98564156746bc4e814f744e1ea0f236564a..e97a7615df85059a120f19857babe4d77df0cb46 100644 (file)
@@ -395,7 +395,7 @@ static const struct snd_soc_dapm_route audio_paths[] = {
        { "Right Input Mixer", "Boost Switch", "Right Boost Mixer", },
        { "Right Input Mixer", NULL, "RINPUT1", },  /* Really Boost Switch */
        { "Right Input Mixer", NULL, "RINPUT2" },
-       { "Right Input Mixer", NULL, "LINPUT3" },
+       { "Right Input Mixer", NULL, "RINPUT3" },
 
        { "Left ADC", NULL, "Left Input Mixer" },
        { "Right ADC", NULL, "Right Input Mixer" },
index 4fbc7689339a8903f724fa9aefcb6feb31dad54f..a1c04dab668469f08161c086f138e562c81f4d0f 100644 (file)
@@ -2754,7 +2754,7 @@ static struct {
 };
 
 static int fs_ratios[] = {
-       64, 128, 192, 256, 348, 512, 768, 1024, 1408, 1536
+       64, 128, 192, 256, 384, 512, 768, 1024, 1408, 1536
 };
 
 static int bclk_divs[] = {
index bb4b78eada586df9b210ff03981f44149e85f25c..23c91fa65ab8f55bc8efea265a1569e838bfc674 100644 (file)
@@ -1247,7 +1247,7 @@ static int davinci_mcasp_suspend(struct snd_soc_dai *dai)
        u32 reg;
        int i;
 
-       context->pm_state = pm_runtime_enabled(mcasp->dev);
+       context->pm_state = pm_runtime_active(mcasp->dev);
        if (!context->pm_state)
                pm_runtime_get_sync(mcasp->dev);
 
index defe0f0082b5e8877d6ae092d53395c6387fbaff..158204d08924972aee5843fcdc54735e8c0509bd 100644 (file)
@@ -3100,11 +3100,16 @@ snd_soc_dapm_new_control(struct snd_soc_dapm_context *dapm,
        }
 
        prefix = soc_dapm_prefix(dapm);
-       if (prefix)
+       if (prefix) {
                w->name = kasprintf(GFP_KERNEL, "%s %s", prefix, widget->name);
-       else
+               if (widget->sname)
+                       w->sname = kasprintf(GFP_KERNEL, "%s %s", prefix,
+                                            widget->sname);
+       } else {
                w->name = kasprintf(GFP_KERNEL, "%s", widget->name);
-
+               if (widget->sname)
+                       w->sname = kasprintf(GFP_KERNEL, "%s", widget->sname);
+       }
        if (w->name == NULL) {
                kfree(w);
                return NULL;
index 3e2ef61c627b831bfec65724cc7166db051f5099..8b7e391dd0b80193d49f8634bb69fa45814593f0 100644 (file)
@@ -918,6 +918,7 @@ static void volume_control_quirks(struct usb_mixer_elem_info *cval,
        case USB_ID(0x046d, 0x081d): /* HD Webcam c510 */
        case USB_ID(0x046d, 0x0825): /* HD Webcam c270 */
        case USB_ID(0x046d, 0x0826): /* HD Webcam c525 */
+       case USB_ID(0x046d, 0x08ca): /* Logitech Quickcam Fusion */
        case USB_ID(0x046d, 0x0991):
        /* Most audio usb devices lie about volume resolution.
         * Most Logitech webcams have res = 384.
@@ -1582,12 +1583,6 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
                              unitid);
                return -EINVAL;
        }
-       /* no bmControls field (e.g. Maya44) -> ignore */
-       if (desc->bLength <= 10 + input_pins) {
-               usb_audio_dbg(state->chip, "MU %d has no bmControls field\n",
-                             unitid);
-               return 0;
-       }
 
        num_ins = 0;
        ich = 0;
@@ -1595,6 +1590,9 @@ static int parse_audio_mixer_unit(struct mixer_build *state, int unitid,
                err = parse_audio_unit(state, desc->baSourceID[pin]);
                if (err < 0)
                        continue;
+               /* no bmControls field (e.g. Maya44) -> ignore */
+               if (desc->bLength <= 10 + input_pins)
+                       continue;
                err = check_input_term(state, desc->baSourceID[pin], &iterm);
                if (err < 0)
                        return err;
index b703cb3cda1993402d60efc03e9e7d840cb68f72..e5000da9e9d7093f6e287194665de2d63f046e93 100644 (file)
@@ -436,6 +436,11 @@ static struct usbmix_ctl_map usbmix_ctl_maps[] = {
                .id = USB_ID(0x200c, 0x1018),
                .map = ebox44_map,
        },
+       {
+               /* MAYA44 USB+ */
+               .id = USB_ID(0x2573, 0x0008),
+               .map = maya44_map,
+       },
        {
                /* KEF X300A */
                .id = USB_ID(0x27ac, 0x1000),
index 7c5a701392785daf7b112443986e5527e63a0324..754e689596a21b43f3b3a45b8f3062ec29b74099 100644 (file)
@@ -1117,7 +1117,10 @@ bool snd_usb_get_sample_rate_quirk(struct snd_usb_audio *chip)
        switch (chip->usb_id) {
        case USB_ID(0x045E, 0x075D): /* MS Lifecam Cinema  */
        case USB_ID(0x045E, 0x076D): /* MS Lifecam HD-5000 */
+       case USB_ID(0x045E, 0x0772): /* MS Lifecam Studio */
+       case USB_ID(0x045E, 0x0779): /* MS Lifecam HD-3000 */
        case USB_ID(0x04D8, 0xFEEA): /* Benchmark DAC1 Pre */
+       case USB_ID(0x074D, 0x3553): /* Outlaw RR2150 (Micronas UAC3553B) */
                return true;
        }
        return false;
@@ -1264,8 +1267,9 @@ u64 snd_usb_interface_dsd_format_quirks(struct snd_usb_audio *chip,
                if (fp->altsetting == 2)
                        return SNDRV_PCM_FMTBIT_DSD_U32_BE;
                break;
-       /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
-       case USB_ID(0x20b1, 0x2009):
+
+       case USB_ID(0x20b1, 0x2009): /* DIYINHK DSD DXD 384kHz USB to I2S/DSD */
+       case USB_ID(0x20b1, 0x2023): /* JLsounds I2SoverUSB */
                if (fp->altsetting == 3)
                        return SNDRV_PCM_FMTBIT_DSD_U32_BE;
                break;
index 9a617adc6675dc06552de428c93b3c611599900b..b35102721cbbc82d1560ae657d41cebebe2fa723 100644 (file)
@@ -1,3 +1,8 @@
+# Some of the tools (perf) use same make variables
+# as in kernel build.
+export srctree=
+export objtree=
+
 include scripts/Makefile.include
 
 help:
@@ -47,11 +52,16 @@ cgroup firewire hv guest usb virtio vm net: FORCE
 liblockdep: FORCE
        $(call descend,lib/lockdep)
 
-libapikfs: FORCE
+libapi: FORCE
        $(call descend,lib/api)
 
-perf: libapikfs FORCE
-       $(call descend,$@)
+# The perf build does not follow the descend function setup,
+# invoking it via it's own make rule.
+PERF_O   = $(if $(O),$(O)/tools/perf,)
+
+perf: FORCE
+       $(Q)mkdir -p $(PERF_O) .
+       $(Q)$(MAKE) --no-print-directory -C perf O=$(PERF_O) subdir=
 
 selftests: FORCE
        $(call descend,testing/$@)
@@ -97,10 +107,10 @@ cgroup_clean hv_clean firewire_clean lguest_clean usb_clean virtio_clean vm_clea
 liblockdep_clean:
        $(call descend,lib/lockdep,clean)
 
-libapikfs_clean:
+libapi_clean:
        $(call descend,lib/api,clean)
 
-perf_clean: libapikfs_clean
+perf_clean:
        $(call descend,$(@:_clean=),clean)
 
 selftests_clean:
diff --git a/tools/arch/alpha/include/asm/barrier.h b/tools/arch/alpha/include/asm/barrier.h
new file mode 100644 (file)
index 0000000..95df19c
--- /dev/null
@@ -0,0 +1,8 @@
+#ifndef __TOOLS_LINUX_ASM_ALPHA_BARRIER_H
+#define __TOOLS_LINUX_ASM_ALPHA_BARRIER_H
+
+#define mb()   __asm__ __volatile__("mb": : :"memory")
+#define rmb()  __asm__ __volatile__("mb": : :"memory")
+#define wmb()  __asm__ __volatile__("wmb": : :"memory")
+
+#endif         /* __TOOLS_LINUX_ASM_ALPHA_BARRIER_H */
diff --git a/tools/arch/arm/include/asm/barrier.h b/tools/arch/arm/include/asm/barrier.h
new file mode 100644 (file)
index 0000000..005c618
--- /dev/null
@@ -0,0 +1,12 @@
+#ifndef _TOOLS_LINUX_ASM_ARM_BARRIER_H
+#define _TOOLS_LINUX_ASM_ARM_BARRIER_H
+
+/*
+ * Use the __kuser_memory_barrier helper in the CPU helper page. See
+ * arch/arm/kernel/entry-armv.S in the kernel source for details.
+ */
+#define mb()           ((void(*)(void))0xffff0fa0)()
+#define wmb()          ((void(*)(void))0xffff0fa0)()
+#define rmb()          ((void(*)(void))0xffff0fa0)()
+
+#endif /* _TOOLS_LINUX_ASM_ARM_BARRIER_H */
diff --git a/tools/arch/arm64/include/asm/barrier.h b/tools/arch/arm64/include/asm/barrier.h
new file mode 100644 (file)
index 0000000..a0483c8
--- /dev/null
@@ -0,0 +1,16 @@
+#ifndef _TOOLS_LINUX_ASM_AARCH64_BARRIER_H
+#define _TOOLS_LINUX_ASM_AARCH64_BARRIER_H
+
+/*
+ * From tools/perf/perf-sys.h, last modified in:
+ * f428ebd184c82a7914b2aa7e9f868918aaf7ea78 perf tools: Fix AAAAARGH64 memory barriers
+ *
+ * XXX: arch/arm64/include/asm/barrier.h in the kernel sources use dsb, is this
+ * a case like for arm32 where we do things differently in userspace?
+ */
+
+#define mb()           asm volatile("dmb ish" ::: "memory")
+#define wmb()          asm volatile("dmb ishst" ::: "memory")
+#define rmb()          asm volatile("dmb ishld" ::: "memory")
+
+#endif /* _TOOLS_LINUX_ASM_AARCH64_BARRIER_H */
diff --git a/tools/arch/ia64/include/asm/barrier.h b/tools/arch/ia64/include/asm/barrier.h
new file mode 100644 (file)
index 0000000..e4422b4
--- /dev/null
@@ -0,0 +1,48 @@
+/*
+ * Copied from the kernel sources to tools/:
+ *
+ * Memory barrier definitions.  This is based on information published
+ * in the Processor Abstraction Layer and the System Abstraction Layer
+ * manual.
+ *
+ * Copyright (C) 1998-2003 Hewlett-Packard Co
+ *     David Mosberger-Tang <davidm@hpl.hp.com>
+ * Copyright (C) 1999 Asit Mallick <asit.k.mallick@intel.com>
+ * Copyright (C) 1999 Don Dugger <don.dugger@intel.com>
+ */
+#ifndef _TOOLS_LINUX_ASM_IA64_BARRIER_H
+#define _TOOLS_LINUX_ASM_IA64_BARRIER_H
+
+#include <linux/compiler.h>
+
+/*
+ * Macros to force memory ordering.  In these descriptions, "previous"
+ * and "subsequent" refer to program order; "visible" means that all
+ * architecturally visible effects of a memory access have occurred
+ * (at a minimum, this means the memory has been read or written).
+ *
+ *   wmb():    Guarantees that all preceding stores to memory-
+ *             like regions are visible before any subsequent
+ *             stores and that all following stores will be
+ *             visible only after all previous stores.
+ *   rmb():    Like wmb(), but for reads.
+ *   mb():     wmb()/rmb() combo, i.e., all previous memory
+ *             accesses are visible before all subsequent
+ *             accesses and vice versa.  This is also known as
+ *             a "fence."
+ *
+ * Note: "mb()" and its variants cannot be used as a fence to order
+ * accesses to memory mapped I/O registers.  For that, mf.a needs to
+ * be used.  However, we don't want to always use mf.a because (a)
+ * it's (presumably) much slower than mf and (b) mf.a is supported for
+ * sequential memory pages only.
+ */
+
+/* XXX From arch/ia64/include/uapi/asm/gcc_intrin.h */
+#define ia64_mf()       asm volatile ("mf" ::: "memory")
+
+#define mb()           ia64_mf()
+#define rmb()          mb()
+#define wmb()          mb()
+
+#endif /* _TOOLS_LINUX_ASM_IA64_BARRIER_H */
diff --git a/tools/arch/mips/include/asm/barrier.h b/tools/arch/mips/include/asm/barrier.h
new file mode 100644 (file)
index 0000000..80f96f7
--- /dev/null
@@ -0,0 +1,20 @@
+#ifndef _TOOLS_LINUX_ASM_MIPS_BARRIER_H
+#define _TOOLS_LINUX_ASM_MIPS_BARRIER_H
+/*
+ * FIXME: This came from tools/perf/perf-sys.h, where it was first introduced
+ * in c1e028ef40b8d6943b767028ba17d4f2ba020edb, more work needed to make it
+ * more closely follow the Linux kernel arch/mips/include/asm/barrier.h file.
+ * Probably when we continue work on tools/ Kconfig support to have all the
+ * CONFIG_ needed for properly doing that.
+ */
+#define mb()           asm volatile(                                   \
+                               ".set   mips2\n\t"                      \
+                               "sync\n\t"                              \
+                               ".set   mips0"                          \
+                               : /* no output */                       \
+                               : /* no input */                        \
+                               : "memory")
+#define wmb()  mb()
+#define rmb()  mb()
+
+#endif /* _TOOLS_LINUX_ASM_MIPS_BARRIER_H */
diff --git a/tools/arch/powerpc/include/asm/barrier.h b/tools/arch/powerpc/include/asm/barrier.h
new file mode 100644 (file)
index 0000000..b23aee8
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copied from the kernel sources:
+ *
+ * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu>
+ */
+#ifndef _TOOLS_LINUX_ASM_POWERPC_BARRIER_H
+#define _TOOLS_LINUX_ASM_POWERPC_BARRIER_H
+
+/*
+ * Memory barrier.
+ * The sync instruction guarantees that all memory accesses initiated
+ * by this processor have been performed (with respect to all other
+ * mechanisms that access memory).  The eieio instruction is a barrier
+ * providing an ordering (separately) for (a) cacheable stores and (b)
+ * loads and stores to non-cacheable memory (e.g. I/O devices).
+ *
+ * mb() prevents loads and stores being reordered across this point.
+ * rmb() prevents loads being reordered across this point.
+ * wmb() prevents stores being reordered across this point.
+ *
+ * *mb() variants without smp_ prefix must order all types of memory
+ * operations with one another. sync is the only instruction sufficient
+ * to do this.
+ */
+#define mb()   __asm__ __volatile__ ("sync" : : : "memory")
+#define rmb()  __asm__ __volatile__ ("sync" : : : "memory")
+#define wmb()  __asm__ __volatile__ ("sync" : : : "memory")
+
+#endif /* _TOOLS_LINUX_ASM_POWERPC_BARRIER_H */
diff --git a/tools/arch/s390/include/asm/barrier.h b/tools/arch/s390/include/asm/barrier.h
new file mode 100644 (file)
index 0000000..f851412
--- /dev/null
@@ -0,0 +1,30 @@
+/*
+ * Copied from the kernel sources:
+ *
+ * Copyright IBM Corp. 1999, 2009
+ *
+ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com>
+ */
+
+#ifndef __TOOLS_LINUX_ASM_BARRIER_H
+#define __TOOLS_LINUX_ASM_BARRIER_H
+
+/*
+ * Force strict CPU ordering.
+ * And yes, this is required on UP too when we're talking
+ * to devices.
+ */
+
+#ifdef CONFIG_HAVE_MARCH_Z196_FEATURES
+/* Fast-BCR without checkpoint synchronization */
+#define __ASM_BARRIER "bcr 14,0\n"
+#else
+#define __ASM_BARRIER "bcr 15,0\n"
+#endif
+
+#define mb() do {  asm volatile(__ASM_BARRIER : : : "memory"); } while (0)
+
+#define rmb()                          mb()
+#define wmb()                          mb()
+
+#endif /* __TOOLS_LIB_ASM_BARRIER_H */
diff --git a/tools/arch/sh/include/asm/barrier.h b/tools/arch/sh/include/asm/barrier.h
new file mode 100644 (file)
index 0000000..c18fd75
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copied from the kernel sources:
+ *
+ * Copyright (C) 1999, 2000  Niibe Yutaka  &  Kaz Kojima
+ * Copyright (C) 2002 Paul Mundt
+ */
+#ifndef __TOOLS_LINUX_ASM_SH_BARRIER_H
+#define __TOOLS_LINUX_ASM_SH_BARRIER_H
+
+/*
+ * A brief note on ctrl_barrier(), the control register write barrier.
+ *
+ * Legacy SH cores typically require a sequence of 8 nops after
+ * modification of a control register in order for the changes to take
+ * effect. On newer cores (like the sh4a and sh5) this is accomplished
+ * with icbi.
+ *
+ * Also note that on sh4a in the icbi case we can forego a synco for the
+ * write barrier, as it's not necessary for control registers.
+ *
+ * Historically we have only done this type of barrier for the MMUCR, but
+ * it's also necessary for the CCR, so we make it generic here instead.
+ */
+#if defined(__SH4A__) || defined(__SH5__)
+#define mb()           __asm__ __volatile__ ("synco": : :"memory")
+#define rmb()          mb()
+#define wmb()          mb()
+#endif
+
+#include <asm-generic/barrier.h>
+
+#endif /* __TOOLS_LINUX_ASM_SH_BARRIER_H */
diff --git a/tools/arch/sparc/include/asm/barrier.h b/tools/arch/sparc/include/asm/barrier.h
new file mode 100644 (file)
index 0000000..8c017b3
--- /dev/null
@@ -0,0 +1,8 @@
+#ifndef ___TOOLS_LINUX_ASM_SPARC_BARRIER_H
+#define ___TOOLS_LINUX_ASM_SPARC_BARRIER_H
+#if defined(__sparc__) && defined(__arch64__)
+#include "barrier_64.h"
+#else
+#include "barrier_32.h"
+#endif
+#endif
diff --git a/tools/arch/sparc/include/asm/barrier_32.h b/tools/arch/sparc/include/asm/barrier_32.h
new file mode 100644 (file)
index 0000000..c5eadd0
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef __TOOLS_PERF_SPARC_BARRIER_H
+#define __TOOLS_PERF_SPARC_BARRIER_H
+
+#include <asm-generic/barrier.h>
+
+#endif /* !(__TOOLS_PERF_SPARC_BARRIER_H) */
diff --git a/tools/arch/sparc/include/asm/barrier_64.h b/tools/arch/sparc/include/asm/barrier_64.h
new file mode 100644 (file)
index 0000000..9a7d732
--- /dev/null
@@ -0,0 +1,42 @@
+#ifndef __TOOLS_LINUX_SPARC64_BARRIER_H
+#define __TOOLS_LINUX_SPARC64_BARRIER_H
+
+/* Copied from the kernel sources to tools/:
+ *
+ * These are here in an effort to more fully work around Spitfire Errata
+ * #51.  Essentially, if a memory barrier occurs soon after a mispredicted
+ * branch, the chip can stop executing instructions until a trap occurs.
+ * Therefore, if interrupts are disabled, the chip can hang forever.
+ *
+ * It used to be believed that the memory barrier had to be right in the
+ * delay slot, but a case has been traced recently wherein the memory barrier
+ * was one instruction after the branch delay slot and the chip still hung.
+ * The offending sequence was the following in sym_wakeup_done() of the
+ * sym53c8xx_2 driver:
+ *
+ *     call    sym_ccb_from_dsa, 0
+ *      movge  %icc, 0, %l0
+ *     brz,pn  %o0, .LL1303
+ *      mov    %o0, %l2
+ *     membar  #LoadLoad
+ *
+ * The branch has to be mispredicted for the bug to occur.  Therefore, we put
+ * the memory barrier explicitly into a "branch always, predicted taken"
+ * delay slot to avoid the problem case.
+ */
+#define membar_safe(type) \
+do {   __asm__ __volatile__("ba,pt     %%xcc, 1f\n\t" \
+                            " membar   " type "\n" \
+                            "1:\n" \
+                            : : : "memory"); \
+} while (0)
+
+/* The kernel always executes in TSO memory model these days,
+ * and furthermore most sparc64 chips implement more stringent
+ * memory ordering than required by the specifications.
+ */
+#define mb()   membar_safe("#StoreLoad")
+#define rmb()  __asm__ __volatile__("":::"memory")
+#define wmb()  __asm__ __volatile__("":::"memory")
+
+#endif /* !(__TOOLS_LINUX_SPARC64_BARRIER_H) */
diff --git a/tools/arch/tile/include/asm/barrier.h b/tools/arch/tile/include/asm/barrier.h
new file mode 100644 (file)
index 0000000..7d3692c
--- /dev/null
@@ -0,0 +1,15 @@
+#ifndef _TOOLS_LINUX_ASM_TILE_BARRIER_H
+#define _TOOLS_LINUX_ASM_TILE_BARRIER_H
+/*
+ * FIXME: This came from tools/perf/perf-sys.h, where it was first introduced
+ * in 620830b6954913647b7c7f68920cf48eddf6ad92, more work needed to make it
+ * more closely follow the Linux kernel arch/tile/include/asm/barrier.h file.
+ * Probably when we continue work on tools/ Kconfig support to have all the
+ * CONFIG_ needed for properly doing that.
+ */
+
+#define mb()           asm volatile ("mf" ::: "memory")
+#define wmb()          mb()
+#define rmb()          mb()
+
+#endif /* _TOOLS_LINUX_ASM_TILE_BARRIER_H */
diff --git a/tools/arch/x86/include/asm/atomic.h b/tools/arch/x86/include/asm/atomic.h
new file mode 100644 (file)
index 0000000..059e33e
--- /dev/null
@@ -0,0 +1,65 @@
+#ifndef _TOOLS_LINUX_ASM_X86_ATOMIC_H
+#define _TOOLS_LINUX_ASM_X86_ATOMIC_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+#include "rmwcc.h"
+
+#define LOCK_PREFIX "\n\tlock; "
+
+/*
+ * Atomic operations that C can't guarantee us.  Useful for
+ * resource counting etc..
+ */
+
+#define ATOMIC_INIT(i) { (i) }
+
+/**
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v.
+ */
+static inline int atomic_read(const atomic_t *v)
+{
+       return ACCESS_ONCE((v)->counter);
+}
+
+/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i.
+ */
+static inline void atomic_set(atomic_t *v, int i)
+{
+       v->counter = i;
+}
+
+/**
+ * atomic_inc - increment atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1.
+ */
+static inline void atomic_inc(atomic_t *v)
+{
+       asm volatile(LOCK_PREFIX "incl %0"
+                    : "+m" (v->counter));
+}
+
+/**
+ * atomic_dec_and_test - decrement and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static inline int atomic_dec_and_test(atomic_t *v)
+{
+       GEN_UNARY_RMWcc(LOCK_PREFIX "decl", v->counter, "%0", "e");
+}
+
+#endif /* _TOOLS_LINUX_ASM_X86_ATOMIC_H */
diff --git a/tools/arch/x86/include/asm/barrier.h b/tools/arch/x86/include/asm/barrier.h
new file mode 100644 (file)
index 0000000..f366d8e
--- /dev/null
@@ -0,0 +1,28 @@
+#ifndef _TOOLS_LINUX_ASM_X86_BARRIER_H
+#define _TOOLS_LINUX_ASM_X86_BARRIER_H
+
+/*
+ * Copied from the Linux kernel sources, and also moving code
+ * out from tools/perf/perf-sys.h so as to make it be located
+ * in a place similar as in the kernel sources.
+ *
+ * Force strict CPU ordering.
+ * And yes, this is required on UP too when we're talking
+ * to devices.
+ */
+
+#if defined(__i386__)
+/*
+ * Some non-Intel clones support out of order store. wmb() ceases to be a
+ * nop for these.
+ */
+#define mb()   asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
+#define rmb()  asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
+#define wmb()  asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
+#elif defined(__x86_64__)
+#define mb()   asm volatile("mfence":::"memory")
+#define rmb()  asm volatile("lfence":::"memory")
+#define wmb()  asm volatile("sfence" ::: "memory")
+#endif
+
+#endif /* _TOOLS_LINUX_ASM_X86_BARRIER_H */
diff --git a/tools/arch/x86/include/asm/rmwcc.h b/tools/arch/x86/include/asm/rmwcc.h
new file mode 100644 (file)
index 0000000..a6669bc
--- /dev/null
@@ -0,0 +1,41 @@
+#ifndef _TOOLS_LINUX_ASM_X86_RMWcc
+#define _TOOLS_LINUX_ASM_X86_RMWcc
+
+#ifdef CC_HAVE_ASM_GOTO
+
+#define __GEN_RMWcc(fullop, var, cc, ...)                              \
+do {                                                                   \
+       asm_volatile_goto (fullop "; j" cc " %l[cc_label]"              \
+                       : : "m" (var), ## __VA_ARGS__                   \
+                       : "memory" : cc_label);                         \
+       return 0;                                                       \
+cc_label:                                                              \
+       return 1;                                                       \
+} while (0)
+
+#define GEN_UNARY_RMWcc(op, var, arg0, cc)                             \
+       __GEN_RMWcc(op " " arg0, var, cc)
+
+#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc)                 \
+       __GEN_RMWcc(op " %1, " arg0, var, cc, vcon (val))
+
+#else /* !CC_HAVE_ASM_GOTO */
+
+#define __GEN_RMWcc(fullop, var, cc, ...)                              \
+do {                                                                   \
+       char c;                                                         \
+       asm volatile (fullop "; set" cc " %1"                           \
+                       : "+m" (var), "=qm" (c)                         \
+                       : __VA_ARGS__ : "memory");                      \
+       return c != 0;                                                  \
+} while (0)
+
+#define GEN_UNARY_RMWcc(op, var, arg0, cc)                             \
+       __GEN_RMWcc(op " " arg0, var, cc)
+
+#define GEN_BINARY_RMWcc(op, var, vcon, val, arg0, cc)                 \
+       __GEN_RMWcc(op " %2, " arg0, var, cc, vcon (val))
+
+#endif /* CC_HAVE_ASM_GOTO */
+
+#endif /* _TOOLS_LINUX_ASM_X86_RMWcc */
diff --git a/tools/arch/xtensa/include/asm/barrier.h b/tools/arch/xtensa/include/asm/barrier.h
new file mode 100644 (file)
index 0000000..583800b
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ * Copied from the kernel sources to tools/:
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License.  See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001 - 2012 Tensilica Inc.
+ */
+
+#ifndef _TOOLS_LINUX_XTENSA_SYSTEM_H
+#define _TOOLS_LINUX_XTENSA_SYSTEM_H
+
+#define mb()  ({ __asm__ __volatile__("memw" : : : "memory"); })
+#define rmb() barrier()
+#define wmb() mb()
+
+#endif /* _TOOLS_LINUX_XTENSA_SYSTEM_H */
index 10df57237a66d26913a4baabd5949f3459f0a534..a51244a8022f91c26591b9553d16e0020c9f9d12 100644 (file)
@@ -37,7 +37,7 @@ subdir-obj-y :=
 
 # Build definitions
 build-file := $(dir)/Build
-include $(build-file)
+-include $(build-file)
 
 quiet_cmd_flex  = FLEX     $@
 quiet_cmd_bison = BISON    $@
@@ -94,12 +94,12 @@ obj-y        := $(patsubst %/, %/$(obj)-in.o, $(obj-y))
 subdir-obj-y := $(filter %/$(obj)-in.o, $(obj-y))
 
 # '$(OUTPUT)/dir' prefix to all objects
-prefix       := $(subst ./,,$(OUTPUT)$(dir)/)
-obj-y        := $(addprefix $(prefix),$(obj-y))
-subdir-obj-y := $(addprefix $(prefix),$(subdir-obj-y))
+objprefix    := $(subst ./,,$(OUTPUT)$(dir)/)
+obj-y        := $(addprefix $(objprefix),$(obj-y))
+subdir-obj-y := $(addprefix $(objprefix),$(subdir-obj-y))
 
 # Final '$(obj)-in.o' object
-in-target := $(prefix)$(obj)-in.o
+in-target := $(objprefix)$(obj)-in.o
 
 PHONY += $(subdir-y)
 
index 3a0b0ca2a28c1b0bcc6a568d6c7d3ee6ced8c627..2975632d51e2341e7e1a60286e0fa822cbec0279 100644 (file)
@@ -27,7 +27,7 @@ endef
 #   the rule that uses them - an example for that is the 'bionic'
 #   feature check. ]
 #
-FEATURE_TESTS                        \
+FEATURE_TESTS ?=                       \
        backtrace                       \
        dwarf                           \
        fortify-source                  \
@@ -53,7 +53,7 @@ FEATURE_TESTS =                       \
        zlib                            \
        lzma
 
-FEATURE_DISPLAY                      \
+FEATURE_DISPLAY ?=                     \
        dwarf                           \
        glibc                           \
        gtk2                            \
index 0e6c3e6767e6c553c34bff9ad6db7b440429d515..70d876237c5709f36a985c4f8c52a56a063167f7 100644 (file)
@@ -2,6 +2,7 @@ ex-y += ex.o
 ex-y += a.o
 ex-y += b.o
 ex-y += empty/
+ex-y += empty2/
 
 libex-y += c.o
 libex-y += d.o
diff --git a/tools/build/tests/ex/empty2/README b/tools/build/tests/ex/empty2/README
new file mode 100644 (file)
index 0000000..2107cc5
--- /dev/null
@@ -0,0 +1,2 @@
+This directory is left intentionally without Build file
+to test proper nesting into Build-less directories.
diff --git a/tools/include/asm-generic/atomic-gcc.h b/tools/include/asm-generic/atomic-gcc.h
new file mode 100644 (file)
index 0000000..2ba78c9
--- /dev/null
@@ -0,0 +1,63 @@
+#ifndef __TOOLS_ASM_GENERIC_ATOMIC_H
+#define __TOOLS_ASM_GENERIC_ATOMIC_H
+
+#include <linux/compiler.h>
+#include <linux/types.h>
+
+/*
+ * Atomic operations that C can't guarantee us.  Useful for
+ * resource counting etc..
+ *
+ * Excerpts obtained from the Linux kernel sources.
+ */
+
+#define ATOMIC_INIT(i) { (i) }
+
+/**
+ * atomic_read - read atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically reads the value of @v.
+ */
+static inline int atomic_read(const atomic_t *v)
+{
+       return ACCESS_ONCE((v)->counter);
+}
+
+/**
+ * atomic_set - set atomic variable
+ * @v: pointer of type atomic_t
+ * @i: required value
+ *
+ * Atomically sets the value of @v to @i.
+ */
+static inline void atomic_set(atomic_t *v, int i)
+{
+        v->counter = i;
+}
+
+/**
+ * atomic_inc - increment atomic variable
+ * @v: pointer of type atomic_t
+ *
+ * Atomically increments @v by 1.
+ */
+static inline void atomic_inc(atomic_t *v)
+{
+       __sync_add_and_fetch(&v->counter, 1);
+}
+
+/**
+ * atomic_dec_and_test - decrement and test
+ * @v: pointer of type atomic_t
+ *
+ * Atomically decrements @v by 1 and
+ * returns true if the result is 0, or false for all other
+ * cases.
+ */
+static inline int atomic_dec_and_test(atomic_t *v)
+{
+       return __sync_sub_and_fetch(&v->counter, 1) == 0;
+}
+
+#endif /* __TOOLS_ASM_GENERIC_ATOMIC_H */
diff --git a/tools/include/asm-generic/barrier.h b/tools/include/asm-generic/barrier.h
new file mode 100644 (file)
index 0000000..47b9339
--- /dev/null
@@ -0,0 +1,44 @@
+/*
+ * Copied from the kernel sources to tools/perf/:
+ *
+ * Generic barrier definitions, originally based on MN10300 definitions.
+ *
+ * It should be possible to use these on really simple architectures,
+ * but it serves more as a starting point for new ports.
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+#ifndef __TOOLS_LINUX_ASM_GENERIC_BARRIER_H
+#define __TOOLS_LINUX_ASM_GENERIC_BARRIER_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/compiler.h>
+
+/*
+ * Force strict CPU ordering. And yes, this is required on UP too when we're
+ * talking to devices.
+ *
+ * Fall back to compiler barriers if nothing better is provided.
+ */
+
+#ifndef mb
+#define mb()   barrier()
+#endif
+
+#ifndef rmb
+#define rmb()  mb()
+#endif
+
+#ifndef wmb
+#define wmb()  mb()
+#endif
+
+#endif /* !__ASSEMBLY__ */
+#endif /* __TOOLS_LINUX_ASM_GENERIC_BARRIER_H */
diff --git a/tools/include/asm/atomic.h b/tools/include/asm/atomic.h
new file mode 100644 (file)
index 0000000..70794f5
--- /dev/null
@@ -0,0 +1,10 @@
+#ifndef __TOOLS_LINUX_ASM_ATOMIC_H
+#define __TOOLS_LINUX_ASM_ATOMIC_H
+
+#if defined(__i386__) || defined(__x86_64__)
+#include "../../arch/x86/include/asm/atomic.h"
+#else
+#include <asm-generic/atomic-gcc.h>
+#endif
+
+#endif /* __TOOLS_LINUX_ASM_ATOMIC_H */
diff --git a/tools/include/asm/barrier.h b/tools/include/asm/barrier.h
new file mode 100644 (file)
index 0000000..ac66ac5
--- /dev/null
@@ -0,0 +1,27 @@
+#if defined(__i386__) || defined(__x86_64__)
+#include "../../arch/x86/include/asm/barrier.h"
+#elif defined(__arm__)
+#include "../../arch/arm/include/asm/barrier.h"
+#elif defined(__aarch64__)
+#include "../../arch/arm64/include/asm/barrier.h"
+#elif defined(__powerpc__)
+#include "../../arch/powerpc/include/asm/barrier.h"
+#elif defined(__s390__)
+#include "../../arch/s390/include/asm/barrier.h"
+#elif defined(__sh__)
+#include "../../arch/sh/include/asm/barrier.h"
+#elif defined(__sparc__)
+#include "../../arch/sparc/include/asm/barrier.h"
+#elif defined(__tile__)
+#include "../../arch/tile/include/asm/barrier.h"
+#elif defined(__alpha__)
+#include "../../arch/alpha/include/asm/barrier.h"
+#elif defined(__mips__)
+#include "../../arch/mips/include/asm/barrier.h"
+#elif defined(__ia64__)
+#include "../../arch/ia64/include/asm/barrier.h"
+#elif defined(__xtensa__)
+#include "../../arch/xtensa/include/asm/barrier.h"
+#else
+#include <asm-generic/barrier.h>
+#endif
diff --git a/tools/include/linux/atomic.h b/tools/include/linux/atomic.h
new file mode 100644 (file)
index 0000000..4e3d3d1
--- /dev/null
@@ -0,0 +1,6 @@
+#ifndef __TOOLS_LINUX_ATOMIC_H
+#define __TOOLS_LINUX_ATOMIC_H
+
+#include <asm/atomic.h>
+
+#endif /* __TOOLS_LINUX_ATOMIC_H */
index 88461f09cc860b0d17ab87c3d4a629ee44214af7..f0e72674c52d2c9b88b46cb281db39fe8fb68d8d 100644 (file)
@@ -1,6 +1,10 @@
 #ifndef _TOOLS_LINUX_COMPILER_H_
 #define _TOOLS_LINUX_COMPILER_H_
 
+/* Optimization barrier */
+/* The "volatile" is due to gcc bugs */
+#define barrier() __asm__ __volatile__("": : :"memory")
+
 #ifndef __always_inline
 # define __always_inline       inline __attribute__((always_inline))
 #endif
diff --git a/tools/include/linux/kernel.h b/tools/include/linux/kernel.h
new file mode 100644 (file)
index 0000000..76df535
--- /dev/null
@@ -0,0 +1,107 @@
+#ifndef __TOOLS_LINUX_KERNEL_H
+#define __TOOLS_LINUX_KERNEL_H
+
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+
+#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
+
+#define PERF_ALIGN(x, a)       __PERF_ALIGN_MASK(x, (typeof(x))(a)-1)
+#define __PERF_ALIGN_MASK(x, mask)     (((x)+(mask))&~(mask))
+
+#ifndef offsetof
+#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
+#endif
+
+#ifndef container_of
+/**
+ * container_of - cast a member of a structure out to the containing structure
+ * @ptr:       the pointer to the member.
+ * @type:      the type of the container struct this is embedded in.
+ * @member:    the name of the member within the struct.
+ *
+ */
+#define container_of(ptr, type, member) ({                     \
+       const typeof(((type *)0)->member) * __mptr = (ptr);     \
+       (type *)((char *)__mptr - offsetof(type, member)); })
+#endif
+
+#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
+
+#ifndef max
+#define max(x, y) ({                           \
+       typeof(x) _max1 = (x);                  \
+       typeof(y) _max2 = (y);                  \
+       (void) (&_max1 == &_max2);              \
+       _max1 > _max2 ? _max1 : _max2; })
+#endif
+
+#ifndef min
+#define min(x, y) ({                           \
+       typeof(x) _min1 = (x);                  \
+       typeof(y) _min2 = (y);                  \
+       (void) (&_min1 == &_min2);              \
+       _min1 < _min2 ? _min1 : _min2; })
+#endif
+
+#ifndef roundup
+#define roundup(x, y) (                                \
+{                                                      \
+       const typeof(y) __y = y;                       \
+       (((x) + (__y - 1)) / __y) * __y;               \
+}                                                      \
+)
+#endif
+
+#ifndef BUG_ON
+#ifdef NDEBUG
+#define BUG_ON(cond) do { if (cond) {} } while (0)
+#else
+#define BUG_ON(cond) assert(!(cond))
+#endif
+#endif
+
+/*
+ * Both need more care to handle endianness
+ * (Don't use bitmap_copy_le() for now)
+ */
+#define cpu_to_le64(x) (x)
+#define cpu_to_le32(x) (x)
+
+static inline int
+vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
+{
+       int i;
+       ssize_t ssize = size;
+
+       i = vsnprintf(buf, size, fmt, args);
+
+       return (i >= ssize) ? (ssize - 1) : i;
+}
+
+static inline int scnprintf(char * buf, size_t size, const char * fmt, ...)
+{
+       va_list args;
+       ssize_t ssize = size;
+       int i;
+
+       va_start(args, fmt);
+       i = vsnprintf(buf, size, fmt, args);
+       va_end(args);
+
+       return (i >= ssize) ? (ssize - 1) : i;
+}
+
+/*
+ * This looks more complex than it should be. But we need to
+ * get the type for the ~ right in round_down (it needs to be
+ * as wide as the result!), and we want to evaluate the macro
+ * arguments just once each.
+ */
+#define __round_mask(x, y) ((__typeof__(x))((y)-1))
+#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
+#define round_down(x, y) ((x) & ~__round_mask(x, y))
+
+#endif
diff --git a/tools/include/linux/list.h b/tools/include/linux/list.h
new file mode 100644 (file)
index 0000000..76b014c
--- /dev/null
@@ -0,0 +1,29 @@
+#include <linux/kernel.h>
+#include <linux/types.h>
+
+#include "../../../include/linux/list.h"
+
+#ifndef TOOLS_LIST_H
+#define TOOLS_LIST_H
+/**
+ * list_del_range - deletes range of entries from list.
+ * @begin: first element in the range to delete from the list.
+ * @end: last element in the range to delete from the list.
+ * Note: list_empty on the range of entries does not return true after this,
+ * the entries is in an undefined state.
+ */
+static inline void list_del_range(struct list_head *begin,
+                                 struct list_head *end)
+{
+       begin->prev->next = end->next;
+       end->next->prev = begin->prev;
+}
+
+/**
+ * list_for_each_from  -       iterate over a list from one of its nodes
+ * @pos:  the &struct list_head to use as a loop cursor, from where to start
+ * @head: the head for your list.
+ */
+#define list_for_each_from(pos, head) \
+       for (; pos != (head); pos = pos->next)
+#endif
diff --git a/tools/include/linux/poison.h b/tools/include/linux/poison.h
new file mode 100644 (file)
index 0000000..0c27bdf
--- /dev/null
@@ -0,0 +1 @@
+#include "../../../include/linux/poison.h"
index b5cf25e05df2ecf945097b09199fbd28148a1b08..8ebf6278b2ef23d7cd45daf7b5f6f14bde675aa2 100644 (file)
@@ -60,6 +60,14 @@ typedef __u32 __bitwise __be32;
 typedef __u64 __bitwise __le64;
 typedef __u64 __bitwise __be64;
 
+typedef struct {
+       int counter;
+} atomic_t;
+
+#ifndef __aligned_u64
+# define __aligned_u64 __u64 __attribute__((aligned(8)))
+#endif
+
 struct list_head {
        struct list_head *next, *prev;
 };
index 0c356fb650220c6cd5d452d68a22eeb286d40d13..18ffccf004264d202632990d40227c27c41d27ad 100644 (file)
@@ -14,9 +14,10 @@ define allow-override
     $(eval $(1) = $(2)))
 endef
 
-# Allow setting CC and AR, or setting CROSS_COMPILE as a prefix.
+# Allow setting CC and AR and LD, or setting CROSS_COMPILE as a prefix.
 $(call allow-override,CC,$(CROSS_COMPILE)gcc)
 $(call allow-override,AR,$(CROSS_COMPILE)ar)
+$(call allow-override,LD,$(CROSS_COMPILE)ld)
 
 INSTALL = install
 
index a11e3c357be7f8fb00720694324f26adae0402c5..cd2cc59a5da7900e53084fe19e650438e13af0a2 100644 (file)
@@ -28,6 +28,9 @@
 #define __init
 #define noinline
 #define list_add_tail_rcu list_add_tail
+#define list_for_each_entry_rcu list_for_each_entry
+#define barrier() 
+#define synchronize_sched()
 
 #ifndef CALLER_ADDR0
 #define CALLER_ADDR0 ((unsigned long)__builtin_return_address(0))
index 35f56be5a4cdb9805ecb8e0b8190346ff804baf5..3c60335fe7be7c5bb35a8d5a64f59795a888e768 100644 (file)
@@ -1 +1,2 @@
 TRACEEVENT-CFLAGS
+libtraceevent-dynamic-list
index d410da335e3daeaeac8da1dbdfdabdc7e23fc38f..6daaff652affdde16240c91330781864aaedfd35 100644 (file)
@@ -23,6 +23,7 @@ endef
 # Allow setting CC and AR, or setting CROSS_COMPILE as a prefix.
 $(call allow-override,CC,$(CROSS_COMPILE)gcc)
 $(call allow-override,AR,$(CROSS_COMPILE)ar)
+$(call allow-override,NM,$(CROSS_COMPILE)nm)
 
 EXT = -std=gnu99
 INSTALL = install
@@ -34,9 +35,15 @@ INSTALL = install
 DESTDIR ?=
 DESTDIR_SQ = '$(subst ','\'',$(DESTDIR))'
 
+LP64 := $(shell echo __LP64__ | ${CC} ${CFLAGS} -E -x c - | tail -n 1)
+ifeq ($(LP64), 1)
+  libdir_relative = lib64
+else
+  libdir_relative = lib
+endif
+
 prefix ?= /usr/local
-bindir_relative = bin
-bindir = $(prefix)/$(bindir_relative)
+libdir = $(prefix)/$(libdir_relative)
 man_dir = $(prefix)/share/man
 man_dir_SQ = '$(subst ','\'',$(man_dir))'
 
@@ -58,7 +65,7 @@ ifeq ($(prefix),$(HOME))
 override plugin_dir = $(HOME)/.traceevent/plugins
 set_plugin_dir := 0
 else
-override plugin_dir = $(prefix)/lib/traceevent/plugins
+override plugin_dir = $(libdir)/traceevent/plugins
 endif
 endif
 
@@ -85,11 +92,11 @@ srctree := $(patsubst %/,%,$(dir $(srctree)))
 #$(info Determined 'srctree' to be $(srctree))
 endif
 
-export prefix bindir src obj
+export prefix libdir src obj
 
 # Shell quotes
-bindir_SQ = $(subst ','\'',$(bindir))
-bindir_relative_SQ = $(subst ','\'',$(bindir_relative))
+libdir_SQ = $(subst ','\'',$(libdir))
+libdir_relative_SQ = $(subst ','\'',$(libdir_relative))
 plugin_dir_SQ = $(subst ','\'',$(plugin_dir))
 
 LIB_FILE = libtraceevent.a libtraceevent.so
@@ -151,8 +158,9 @@ PLUGINS_IN := $(PLUGINS:.so=-in.o)
 
 TE_IN    := $(OUTPUT)libtraceevent-in.o
 LIB_FILE := $(addprefix $(OUTPUT),$(LIB_FILE))
+DYNAMIC_LIST_FILE := $(OUTPUT)libtraceevent-dynamic-list
 
-CMD_TARGETS = $(LIB_FILE) $(PLUGINS)
+CMD_TARGETS = $(LIB_FILE) $(PLUGINS) $(DYNAMIC_LIST_FILE)
 
 TARGETS = $(CMD_TARGETS)
 
@@ -169,6 +177,9 @@ $(OUTPUT)libtraceevent.so: $(TE_IN)
 $(OUTPUT)libtraceevent.a: $(TE_IN)
        $(QUIET_LINK)$(RM) $@; $(AR) rcs $@ $^
 
+$(OUTPUT)libtraceevent-dynamic-list: $(PLUGINS)
+       $(QUIET_GEN)$(call do_generate_dynamic_list_file, $(PLUGINS), $@)
+
 plugins: $(PLUGINS)
 
 __plugin_obj = $(notdir $@)
@@ -238,9 +249,16 @@ define do_install_plugins
        done
 endef
 
+define do_generate_dynamic_list_file
+       (echo '{';                                                      \
+       $(NM) -u -D $1 | awk 'NF>1 {print "\t"$$2";"}' | sort -u;       \
+       echo '};';                                                      \
+       ) > $2
+endef
+
 install_lib: all_cmd install_plugins
        $(call QUIET_INSTALL, $(LIB_FILE)) \
-               $(call do_install,$(LIB_FILE),$(bindir_SQ))
+               $(call do_install,$(LIB_FILE),$(libdir_SQ))
 
 install_plugins: $(PLUGINS)
        $(call QUIET_INSTALL, trace_plugins) \
index 29f94f6f0d9e9e2510d38471724e686a9a21167c..cc25f059ab3dfcc5368804597120f96d20dabfdf 100644 (file)
@@ -1387,7 +1387,7 @@ static int event_read_fields(struct event_format *event, struct format_field **f
                        do_warning_event(event, "%s: no type found", __func__);
                        goto fail;
                }
-               field->name = last_token;
+               field->name = field->alias = last_token;
 
                if (test_type(type, EVENT_OP))
                        goto fail;
@@ -1469,7 +1469,7 @@ static int event_read_fields(struct event_format *event, struct format_field **f
                                size_dynamic = type_size(field->name);
                                free_token(field->name);
                                strcat(field->type, brackets);
-                               field->name = token;
+                               field->name = field->alias = token;
                                type = read_token(&token);
                        } else {
                                char *new_type;
@@ -6444,6 +6444,8 @@ void pevent_ref(struct pevent *pevent)
 void pevent_free_format_field(struct format_field *field)
 {
        free(field->type);
+       if (field->alias != field->name)
+               free(field->alias);
        free(field->name);
        free(field);
 }
index 86a5839fb048e87d2f76982ab51231ef4646dfc5..063b1971eb35288ae1a8d72cab18a5df815126a4 100644 (file)
@@ -191,6 +191,7 @@ struct format_field {
        struct event_format     *event;
        char                    *type;
        char                    *name;
+       char                    *alias;
        int                     offset;
        int                     size;
        unsigned int            arraylen;
index 4592d84383188e70d1c59619a7f3b642d49f861e..ec57d0c1fbc2b679a82baf35219ea6cfa523354d 100644 (file)
@@ -4,6 +4,19 @@
 #include <endian.h>
 #include "event-parse.h"
 
+/*
+ * From glibc endian.h, for older systems where it is not present, e.g.: RHEL5,
+ * Fedora6.
+ */
+#ifndef le16toh
+# if __BYTE_ORDER == __LITTLE_ENDIAN
+#  define le16toh(x) (x)
+# else
+#  define le16toh(x) __bswap_16 (x)
+# endif
+#endif
+
+
 static unsigned long long
 process___le16_to_cpup(struct trace_seq *s, unsigned long long *args)
 {
index c5baf9c591b7bb5a2c280e173f4e0a2b561285fa..618c2bcd4eabc6143b0e7f0431f57b8620101fe5 100644 (file)
@@ -123,6 +123,8 @@ static int get_last_jit_image(char *haystack, size_t hlen,
        assert(ret == 0);
 
        ptr = haystack;
+       memset(pmatch, 0, sizeof(pmatch));
+
        while (1) {
                ret = regexec(&regex, ptr, 1, pmatch, 0);
                if (ret == 0) {
index 812f904193e8869bf0bd179d5dd36e42d51c35c8..09db62ba5786673d2029d8c6b6f96122daa878a1 100644 (file)
@@ -28,3 +28,4 @@ config.mak.autogen
 *-flex.*
 *.pyc
 *.pyo
+.config-detected
diff --git a/tools/perf/Documentation/callchain-overhead-calculation.txt b/tools/perf/Documentation/callchain-overhead-calculation.txt
new file mode 100644 (file)
index 0000000..1a75792
--- /dev/null
@@ -0,0 +1,108 @@
+Overhead calculation
+--------------------
+The overhead can be shown in two columns as 'Children' and 'Self' when
+perf collects callchains.  The 'self' overhead is simply calculated by
+adding all period values of the entry - usually a function (symbol).
+This is the value that perf shows traditionally and sum of all the
+'self' overhead values should be 100%.
+
+The 'children' overhead is calculated by adding all period values of
+the child functions so that it can show the total overhead of the
+higher level functions even if they don't directly execute much.
+'Children' here means functions that are called from another (parent)
+function.
+
+It might be confusing that the sum of all the 'children' overhead
+values exceeds 100% since each of them is already an accumulation of
+'self' overhead of its child functions.  But with this enabled, users
+can find which function has the most overhead even if samples are
+spread over the children.
+
+Consider the following example; there are three functions like below.
+
+-----------------------
+void foo(void) {
+    /* do something */
+}
+
+void bar(void) {
+    /* do something */
+    foo();
+}
+
+int main(void) {
+    bar()
+    return 0;
+}
+-----------------------
+
+In this case 'foo' is a child of 'bar', and 'bar' is an immediate
+child of 'main' so 'foo' also is a child of 'main'.  In other words,
+'main' is a parent of 'foo' and 'bar', and 'bar' is a parent of 'foo'.
+
+Suppose all samples are recorded in 'foo' and 'bar' only.  When it's
+recorded with callchains the output will show something like below
+in the usual (self-overhead-only) output of perf report:
+
+----------------------------------
+Overhead  Symbol
+........  .....................
+  60.00%  foo
+          |
+          --- foo
+              bar
+              main
+              __libc_start_main
+
+  40.00%  bar
+          |
+          --- bar
+              main
+              __libc_start_main
+----------------------------------
+
+When the --children option is enabled, the 'self' overhead values of
+child functions (i.e. 'foo' and 'bar') are added to the parents to
+calculate the 'children' overhead.  In this case the report could be
+displayed as:
+
+-------------------------------------------
+Children      Self  Symbol
+........  ........  ....................
+ 100.00%     0.00%  __libc_start_main
+          |
+          --- __libc_start_main
+
+ 100.00%     0.00%  main
+          |
+          --- main
+              __libc_start_main
+
+ 100.00%    40.00%  bar
+          |
+          --- bar
+              main
+              __libc_start_main
+
+  60.00%    60.00%  foo
+          |
+          --- foo
+              bar
+              main
+              __libc_start_main
+-------------------------------------------
+
+In the above output, the 'self' overhead of 'foo' (60%) was add to the
+'children' overhead of 'bar', 'main' and '\_\_libc_start_main'.
+Likewise, the 'self' overhead of 'bar' (40%) was added to the
+'children' overhead of 'main' and '\_\_libc_start_main'.
+
+So '\_\_libc_start_main' and 'main' are shown first since they have
+same (100%) 'children' overhead (even though they have zero 'self'
+overhead) and they are the parents of 'foo' and 'bar'.
+
+Since v3.16 the 'children' overhead is shown by default and the output
+is sorted by its values. The 'children' overhead is disabled by
+specifying --no-children option on the command line or by adding
+'report.children = false' or 'top.children = false' in the perf config
+file.
index f6480cbf309b40fee97e86112033c1a0cf04236a..bf3d0644bf1066677ae6ced215f82ecbc55f409d 100644 (file)
@@ -210,6 +210,9 @@ Suite for evaluating hash tables.
 *wake*::
 Suite for evaluating wake calls.
 
+*wake-parallel*::
+Suite for evaluating parallel wake calls.
+
 *requeue*::
 Suite for evaluating requeue calls.
 
index dc7442cf3d7f80920b8ba36f00fb8c0da972534b..b876ae312699b9388f6565eec18e239305309f31 100644 (file)
@@ -44,6 +44,33 @@ OPTIONS
 --kallsyms=<file>::
        kallsyms pathname
 
+--itrace::
+       Decode Instruction Tracing data, replacing it with synthesized events.
+       Options are:
+
+               i       synthesize instructions events
+               b       synthesize branches events
+               c       synthesize branches events (calls only)
+               r       synthesize branches events (returns only)
+               x       synthesize transactions events
+               e       synthesize error events
+               d       create a debug log
+               g       synthesize a call chain (use with i or x)
+
+       The default is all events i.e. the same as --itrace=ibxe
+
+       In addition, the period (default 100000) for instructions events
+       can be specified in units of:
+
+               i       instructions
+               t       ticks
+               ms      milliseconds
+               us      microseconds
+               ns      nanoseconds (default)
+
+       Also the call chain size (default 16, max. 1024) for instructions or
+       transactions events can be specified.
+
 SEE ALSO
 --------
 linkperf:perf-record[1], linkperf:perf-report[1], linkperf:perf-archive[1]
index 23219c65c16f77892a30b9b85f38609f1b1dc1f5..ff0f433b3fce1ee402a375b62703d7034c468e34 100644 (file)
@@ -37,7 +37,11 @@ OPTIONS
 
 -s <key[,key2...]>::
 --sort=<key[,key2...]>::
-       Sort the output (default: frag,hit,bytes)
+       Sort the output (default: 'frag,hit,bytes' for slab and 'bytes,hit'
+       for page).  Available sort keys are 'ptr, callsite, bytes, hit,
+       pingpong, frag' for slab and 'page, callsite, bytes, hit, order,
+       migtype, gfp' for page.  This option should be preceded by one of the
+       mode selection options - i.e. --slab, --page, --alloc and/or --caller.
 
 -l <num>::
 --line=<num>::
@@ -52,6 +56,11 @@ OPTIONS
 --page::
        Analyze page allocator events
 
+--live::
+       Show live page stat.  The perf kmem shows total allocation stat by
+       default, but this option shows live (currently allocated) pages
+       instead.  (This option works with --page option only)
+
 SEE ALSO
 --------
 linkperf:perf-record[1]
index 6252e776009c022dda55970dad2aa72e13610cf4..6a5bb2b170391da59b572f8c4230209fb04199e2 100644 (file)
@@ -151,6 +151,12 @@ STAT LIVE OPTIONS
        Show events other than HLT (x86 only) or Wait state (s390 only)
        that take longer than duration usecs.
 
+--proc-map-timeout::
+       When processing pre-existing threads /proc/XXX/mmap, it may take
+       a long time, because the file may be huge. A time out is needed
+       in such cases.
+       This option sets the time out limit. The default value is 500 ms.
+
 SEE ALSO
 --------
 linkperf:perf-top[1], linkperf:perf-record[1], linkperf:perf-report[1],
index 239609c09f83a10ebde91c8254ab94c9b4d15354..3a8a9ba2b0412aba28f1796e283106f84ec63266 100644 (file)
@@ -14,11 +14,13 @@ or
 or
 'perf probe' [options] --del='[GROUP:]EVENT' [...]
 or
-'perf probe' --list
+'perf probe' --list[=[GROUP:]EVENT]
 or
 'perf probe' [options] --line='LINE'
 or
 'perf probe' [options] --vars='PROBEPOINT'
+or
+'perf probe' [options] --funcs
 
 DESCRIPTION
 -----------
@@ -64,8 +66,8 @@ OPTIONS
        classes(e.g. [a-z], [!A-Z]).
 
 -l::
---list::
-       List up current probe events.
+--list[=[GROUP:]EVENT]::
+       List up current probe events. This can also accept filtering patterns of event names.
 
 -L::
 --line=::
@@ -81,10 +83,15 @@ OPTIONS
        (Only for --vars) Show external defined variables in addition to local
        variables.
 
+--no-inlines::
+       (Only for --add) Search only for non-inlined functions. The functions
+       which do not have instances are ignored.
+
 -F::
---funcs::
+--funcs[=FILTER]::
        Show available functions in given module or kernel. With -x/--exec,
        can also list functions in a user space executable / shared library.
+       This also can accept a FILTER rule argument.
 
 --filter=FILTER::
        (Only for --vars and --funcs) Set filter. FILTER is a combination of glob
@@ -148,7 +155,7 @@ Each probe argument follows below syntax.
  [NAME=]LOCALVAR|$retval|%REG|@SYMBOL[:TYPE]
 
 'NAME' specifies the name of this argument (optional). You can use the name of local variable, local data structure member (e.g. var->field, var.field2), local array with fixed index (e.g. array[1], var->array[0], var->pointer[2]), or kprobe-tracer argument format (e.g. $retval, %ax, etc). Note that the name of this argument will be set as the last member name if you specify a local data structure member (e.g. field2 for 'var->field1.field2'.)
-'$vars' special argument is also available for NAME, it is expanded to the local variables which can access at given probe point.
+'$vars' and '$params' special arguments are also available for NAME, '$vars' is expanded to the local variables (including function parameters) which can access at given probe point. '$params' is expanded to only the function parameters.
 'TYPE' casts the type of this argument (optional). If omitted, perf probe automatically set the type based on debuginfo. You can specify 'string' type only for the local variable or structure member which is an array of or a pointer to 'char' or 'unsigned char' type.
 
 On x86 systems %REG is always the short form of the register: for example %AX. %RAX or %EAX is not valid.
index 4847a793de6516df66dc609478c36724772bb941..9b9d9d086680ae82885d2603d2b90435132234bc 100644 (file)
@@ -108,6 +108,8 @@ OPTIONS
        Number of mmap data pages (must be a power of two) or size
        specification with appended unit character - B/K/M/G. The
        size is rounded up to have nearest pages power of two value.
+       Also, by adding a comma, the number of mmap pages for AUX
+       area tracing can be specified.
 
 --group::
        Put all events in a single event group.  This precedes the --event
@@ -145,16 +147,21 @@ OPTIONS
 
 -s::
 --stat::
-       Per thread counts.
+       Record per-thread event counts.  Use it with 'perf report -T' to see
+       the values.
 
 -d::
 --data::
-       Sample addresses.
+       Record the sample addresses.
 
 -T::
 --timestamp::
-       Sample timestamps. Use it with 'perf report -D' to see the timestamps,
-       for instance.
+       Record the sample timestamps. Use it with 'perf report -D' to see the
+       timestamps, for instance.
+
+-P::
+--period::
+       Record the sample period.
 
 -n::
 --no-samples::
@@ -257,6 +264,18 @@ records. See clock_gettime(). In particular CLOCK_MONOTONIC and
 CLOCK_MONOTONIC_RAW are supported, some events might also allow
 CLOCK_BOOTTIME, CLOCK_REALTIME and CLOCK_TAI.
 
+-S::
+--snapshot::
+Select AUX area tracing Snapshot Mode. This option is valid only with an
+AUX area tracing event. Optionally the number of bytes to capture per
+snapshot can be specified. In Snapshot Mode, trace data is captured only when
+signal SIGUSR2 is received.
+
+--proc-map-timeout::
+When processing pre-existing threads /proc/XXX/mmap, it may take a long time,
+because the file may be huge. A time out is needed in such cases.
+This option sets the time out limit. The default value is 500 ms.
+
 SEE ALSO
 --------
 linkperf:perf-stat[1], linkperf:perf-list[1]
index 4879cf63882482155ffad05a3bc2abc8dff90f14..c33b69f3374fda01b6a9ee7dcca6b7bbb2f74e8c 100644 (file)
@@ -34,7 +34,8 @@ OPTIONS
 
 -T::
 --threads::
-       Show per-thread event counters
+       Show per-thread event counters.  The input data file should be recorded
+       with -s option.
 -c::
 --comms=::
        Only consider symbols in these comms. CSV that understands
@@ -193,6 +194,7 @@ OPTIONS
        Accumulate callchain of children to parent entry so that then can
        show up in the output.  The output will have a new "Children" column
        and will be sorted on the data.  It requires callchains are recorded.
+       See the `overhead calculation' section for more details.
 
 --max-stack::
        Set the stack depth limit when parsing the callchain, anything
@@ -323,6 +325,37 @@ OPTIONS
 --header-only::
        Show only perf.data header (forces --stdio).
 
+--itrace::
+       Options for decoding instruction tracing data. The options are:
+
+               i       synthesize instructions events
+               b       synthesize branches events
+               c       synthesize branches events (calls only)
+               r       synthesize branches events (returns only)
+               x       synthesize transactions events
+               e       synthesize error events
+               d       create a debug log
+               g       synthesize a call chain (use with i or x)
+
+       The default is all events i.e. the same as --itrace=ibxe
+
+       In addition, the period (default 100000) for instructions events
+       can be specified in units of:
+
+               i       instructions
+               t       ticks
+               ms      milliseconds
+               us      microseconds
+               ns      nanoseconds (default)
+
+       Also the call chain size (default 16, max. 1024) for instructions or
+       transactions events can be specified.
+
+       To disable decoding entirely, use --no-itrace.
+
+
+include::callchain-overhead-calculation.txt[]
+
 SEE ALSO
 --------
 linkperf:perf-stat[1], linkperf:perf-annotate[1]
index 79445750fcb322fb323c38b6e9a921500f5241c6..c82df572fac2ed4b4285487485d0ed33492e36c5 100644 (file)
@@ -115,7 +115,8 @@ OPTIONS
 -f::
 --fields::
         Comma separated list of fields to print. Options are:
-        comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff, srcline, period.
+        comm, tid, pid, time, cpu, event, trace, ip, sym, dso, addr, symoff,
+       srcline, period, flags.
         Field list can be prepended with the type, trace, sw or hw,
         to indicate to which event type the field list applies.
         e.g., -f sw:comm,tid,time,ip,sym  and -f trace:time,cpu,trace
@@ -165,6 +166,12 @@ OPTIONS
 
        At this point usage is displayed, and perf-script exits.
 
+       The flags field is synthesized and may have a value when Instruction
+       Trace decoding. The flags are "bcrosyiABEx" which stand for branch,
+       call, return, conditional, system, asynchronous, interrupt,
+       transaction abort, trace begin, trace end, and in transaction,
+       respectively.
+
        Finally, a user may not set fields to none for all event types.
        i.e., -f "" is not allowed.
 
@@ -221,6 +228,34 @@ OPTIONS
 --header-only
        Show only perf.data header.
 
+--itrace::
+       Options for decoding instruction tracing data. The options are:
+
+               i       synthesize instructions events
+               b       synthesize branches events
+               c       synthesize branches events (calls only)
+               r       synthesize branches events (returns only)
+               x       synthesize transactions events
+               e       synthesize error events
+               d       create a debug log
+               g       synthesize a call chain (use with i or x)
+
+       The default is all events i.e. the same as --itrace=ibxe
+
+       In addition, the period (default 100000) for instructions events
+       can be specified in units of:
+
+               i       instructions
+               t       ticks
+               ms      milliseconds
+               us      microseconds
+               ns      nanoseconds (default)
+
+       Also the call chain size (default 16, max. 1024) for instructions or
+       transactions events can be specified.
+
+       To disable decoding entirely, use --no-itrace.
+
 SEE ALSO
 --------
 linkperf:perf-record[1], linkperf:perf-script-perl[1],
index 3265b10705188027ab30256f7475d218d78f6fbb..776aec4d092771ed8ea7c68c7ce205d0b7578aaa 100644 (file)
@@ -168,7 +168,7 @@ Default is to monitor all CPUS.
        Accumulate callchain of children to parent entry so that then can
        show up in the output.  The output will have a new "Children" column
        and will be sorted on the data.  It requires -g/--call-graph option
-       enabled.
+       enabled.  See the `overhead calculation' section for more details.
 
 --max-stack::
        Set the stack depth limit when parsing the callchain, anything
@@ -201,6 +201,12 @@ Default is to monitor all CPUS.
        Force each column width to the provided list, for large terminal
        readability.  0 means no limit (default behavior).
 
+--proc-map-timeout::
+       When processing pre-existing threads /proc/XXX/mmap, it may take
+       a long time, because the file may be huge. A time out is needed
+       in such cases.
+       This option sets the time out limit. The default value is 500 ms.
+
 
 INTERACTIVE PROMPTING KEYS
 --------------------------
@@ -234,6 +240,7 @@ INTERACTIVE PROMPTING KEYS
 
 Pressing any unmapped key displays a menu, and prompts for input.
 
+include::callchain-overhead-calculation.txt[]
 
 SEE ALSO
 --------
index ba03fd5d1a5476ce218e6b5c7d36720a9ec26294..7ea078658a875029fb0ad03101bacdbe45e31564 100644 (file)
@@ -35,7 +35,7 @@ OPTIONS
 
 -e::
 --expr::
-       List of events to show, currently only syscall names.
+       List of syscalls to show, currently only syscall names.
        Prefixing with ! shows all syscalls but the ones specified.  You may
        need to escape it.
 
@@ -121,6 +121,11 @@ the thread executes on the designated CPUs. Default is to monitor all CPUs.
 --event::
        Trace other events, see 'perf list' for a complete list.
 
+--proc-map-timeout::
+       When processing pre-existing threads /proc/XXX/mmap, it may take a long time,
+       because the file may be huge. A time out is needed in such cases.
+       This option sets the time out limit. The default value is 500 ms.
+
 PAGEFAULTS
 ----------
 
index 11ccbb22ea2b8f1538f07450504c35b50c10399f..fe50a1b34aa0035dec38a07a752ae33df9ad0e5d 100644 (file)
@@ -1,12 +1,30 @@
 tools/perf
+tools/arch/alpha/include/asm/barrier.h
+tools/arch/arm/include/asm/barrier.h
+tools/arch/ia64/include/asm/barrier.h
+tools/arch/mips/include/asm/barrier.h
+tools/arch/powerpc/include/asm/barrier.h
+tools/arch/s390/include/asm/barrier.h
+tools/arch/sh/include/asm/barrier.h
+tools/arch/sparc/include/asm/barrier.h
+tools/arch/sparc/include/asm/barrier_32.h
+tools/arch/sparc/include/asm/barrier_64.h
+tools/arch/tile/include/asm/barrier.h
+tools/arch/x86/include/asm/barrier.h
+tools/arch/xtensa/include/asm/barrier.h
 tools/scripts
 tools/build
+tools/arch/x86/include/asm/atomic.h
+tools/arch/x86/include/asm/rmwcc.h
 tools/lib/traceevent
 tools/lib/api
 tools/lib/symbol/kallsyms.c
 tools/lib/symbol/kallsyms.h
 tools/lib/util/find_next_bit.c
+tools/include/asm/atomic.h
+tools/include/asm/barrier.h
 tools/include/asm/bug.h
+tools/include/asm-generic/barrier.h
 tools/include/asm-generic/bitops/arch_hweight.h
 tools/include/asm-generic/bitops/atomic.h
 tools/include/asm-generic/bitops/const_hweight.h
@@ -17,35 +35,35 @@ tools/include/asm-generic/bitops/fls64.h
 tools/include/asm-generic/bitops/fls.h
 tools/include/asm-generic/bitops/hweight.h
 tools/include/asm-generic/bitops.h
+tools/include/linux/atomic.h
 tools/include/linux/bitops.h
 tools/include/linux/compiler.h
 tools/include/linux/export.h
 tools/include/linux/hash.h
+tools/include/linux/kernel.h
+tools/include/linux/list.h
 tools/include/linux/log2.h
+tools/include/linux/poison.h
 tools/include/linux/types.h
 include/asm-generic/bitops/arch_hweight.h
 include/asm-generic/bitops/const_hweight.h
 include/asm-generic/bitops/fls64.h
 include/asm-generic/bitops/__fls.h
 include/asm-generic/bitops/fls.h
-include/linux/const.h
 include/linux/perf_event.h
 include/linux/rbtree.h
 include/linux/list.h
 include/linux/hash.h
 include/linux/stringify.h
-lib/find_next_bit.c
 lib/hweight.c
 lib/rbtree.c
 include/linux/swab.h
 arch/*/include/asm/unistd*.h
-arch/*/include/asm/perf_regs.h
 arch/*/include/uapi/asm/unistd*.h
 arch/*/include/uapi/asm/perf_regs.h
 arch/*/lib/memcpy*.S
 arch/*/lib/memset*.S
 include/linux/poison.h
-include/linux/magic.h
 include/linux/hw_breakpoint.h
 include/linux/rbtree_augmented.h
 include/uapi/linux/perf_event.h
index c699dc35eef9cbd1bc96427f453acc23a2e9c103..d31a7bbd7cee8610db236c7842cfb5ec63dc56b0 100644 (file)
@@ -24,7 +24,7 @@ unexport MAKEFLAGS
 # (To override it, run 'make JOBS=1' and similar.)
 #
 ifeq ($(JOBS),)
-  JOBS := $(shell egrep -c '^processor|^CPU' /proc/cpuinfo 2>/dev/null)
+  JOBS := $(shell (getconf _NPROCESSORS_ONLN || egrep -c '^processor|^CPU[0-9]' /proc/cpuinfo) 2>/dev/null)
   ifeq ($(JOBS),0)
     JOBS := 1
   endif
index c43a2051759157dd6b65118b34b342b149f58861..1af0cfeb7a57824980ef64fdf4d26f643fa7ab6c 100644 (file)
@@ -73,6 +73,8 @@ include config/utilities.mak
 # for CTF data format.
 #
 # Define NO_LZMA if you do not want to support compressed (xz) kernel modules
+#
+# Define NO_AUXTRACE if you do not want AUX area tracing support
 
 ifeq ($(srctree),)
 srctree := $(patsubst %/,%,$(dir $(shell pwd)))
@@ -171,6 +173,9 @@ endif
 LIBTRACEEVENT = $(TE_PATH)libtraceevent.a
 export LIBTRACEEVENT
 
+LIBTRACEEVENT_DYNAMIC_LIST = $(TE_PATH)libtraceevent-dynamic-list
+LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS = -Xlinker --dynamic-list=$(LIBTRACEEVENT_DYNAMIC_LIST)
+
 LIBAPI = $(LIB_PATH)libapi.a
 export LIBAPI
 
@@ -185,8 +190,9 @@ python-clean := $(call QUIET_CLEAN, python) $(RM) -r $(PYTHON_EXTBUILD) $(OUTPUT
 PYTHON_EXT_SRCS := $(shell grep -v ^\# util/python-ext-sources)
 PYTHON_EXT_DEPS := util/python-ext-sources util/setup.py $(LIBTRACEEVENT) $(LIBAPI)
 
-$(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS)
-       $(QUIET_GEN)CFLAGS='$(CFLAGS)' $(PYTHON_WORD) util/setup.py \
+$(OUTPUT)python/perf.so: $(PYTHON_EXT_SRCS) $(PYTHON_EXT_DEPS) $(LIBTRACEEVENT_DYNAMIC_LIST)
+       $(QUIET_GEN)CFLAGS='$(CFLAGS)' LDFLAGS='$(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS)' \
+         $(PYTHON_WORD) util/setup.py \
          --quiet build_ext; \
        mkdir -p $(OUTPUT)python && \
        cp $(PYTHON_EXTBUILD_LIB)perf.so $(OUTPUT)python/
@@ -276,8 +282,9 @@ build := -f $(srctree)/tools/build/Makefile.build dir=. obj
 $(PERF_IN): $(OUTPUT)PERF-VERSION-FILE $(OUTPUT)common-cmds.h FORCE
        $(Q)$(MAKE) $(build)=perf
 
-$(OUTPUT)perf: $(PERFLIBS) $(PERF_IN)
-       $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(PERF_IN) $(LIBS) -o $@
+$(OUTPUT)perf: $(PERFLIBS) $(PERF_IN) $(LIBTRACEEVENT_DYNAMIC_LIST)
+       $(QUIET_LINK)$(CC) $(CFLAGS) $(LDFLAGS) $(LIBTRACEEVENT_DYNAMIC_LIST_LDFLAGS) \
+               $(PERF_IN) $(LIBS) -o $@
 
 $(GTK_IN): FORCE
        $(Q)$(MAKE) $(build)=gtk
@@ -371,7 +378,13 @@ $(LIB_FILE): $(LIBPERF_IN)
 LIBTRACEEVENT_FLAGS += plugin_dir=$(plugindir_SQ)
 
 $(LIBTRACEEVENT): FORCE
-       $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) $(OUTPUT)libtraceevent.a plugins
+       $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) $(OUTPUT)libtraceevent.a
+
+libtraceevent_plugins: FORCE
+       $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) plugins
+
+$(LIBTRACEEVENT_DYNAMIC_LIST): libtraceevent_plugins
+       $(Q)$(MAKE) -C $(TRACE_EVENT_DIR) $(LIBTRACEEVENT_FLAGS) O=$(OUTPUT) $(OUTPUT)libtraceevent-dynamic-list
 
 $(LIBTRACEEVENT)-clean:
        $(call QUIET_CLEAN, libtraceevent)
@@ -462,7 +475,7 @@ check: $(OUTPUT)common-cmds.h
 
 install-gtk:
 
-install-bin: all install-gtk
+install-tools: all install-gtk
        $(call QUIET_INSTALL, binaries) \
                $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(bindir_SQ)'; \
                $(INSTALL) $(OUTPUT)perf '$(DESTDIR_SQ)$(bindir_SQ)'; \
@@ -500,12 +513,16 @@ endif
        $(call QUIET_INSTALL, perf_completion-script) \
                $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d'; \
                $(INSTALL) perf-completion.sh '$(DESTDIR_SQ)$(sysconfdir_SQ)/bash_completion.d/perf'
+
+install-tests: all install-gtk
        $(call QUIET_INSTALL, tests) \
                $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'; \
                $(INSTALL) tests/attr.py '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests'; \
                $(INSTALL) -d -m 755 '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'; \
                $(INSTALL) tests/attr/* '$(DESTDIR_SQ)$(perfexec_instdir_SQ)/tests/attr'
 
+install-bin: install-tools install-tests
+
 install: install-bin try-install-man install-traceevent-plugins
 
 install-python_ext:
@@ -549,4 +566,5 @@ FORCE:
 .PHONY: all install clean config-clean strip install-gtk
 .PHONY: shell_compatibility_test please_set_SHELL_PATH_to_a_more_modern_shell
 .PHONY: $(GIT-HEAD-PHONY) TAGS tags cscope FORCE single_dep
+.PHONY: libtraceevent_plugins
 
index 54afe4a467e7d9a3f505cad40650378bdcd27296..41bf61da476a4ce3150fc0184c84718d807be894 100644 (file)
@@ -1 +1,2 @@
 libperf-y += util/
+libperf-$(CONFIG_DWARF_UNWIND) += tests/
index 1d3f39c3aa564fd2e85a950fddcb4bba18f5e8db..4e5af27e3fbfa5f42d9d474d8ff7dc237722206b 100644 (file)
@@ -5,8 +5,11 @@
 #include <linux/types.h>
 #include <asm/perf_regs.h>
 
+void perf_regs_load(u64 *regs);
+
 #define PERF_REGS_MASK ((1ULL << PERF_REG_ARM64_MAX) - 1)
 #define PERF_REGS_MAX  PERF_REG_ARM64_MAX
+#define PERF_SAMPLE_REGS_ABI   PERF_SAMPLE_REGS_ABI_64
 
 #define PERF_REG_IP    PERF_REG_ARM64_PC
 #define PERF_REG_SP    PERF_REG_ARM64_SP
diff --git a/tools/perf/arch/arm64/tests/Build b/tools/perf/arch/arm64/tests/Build
new file mode 100644 (file)
index 0000000..b30eff9
--- /dev/null
@@ -0,0 +1,2 @@
+libperf-y += regs_load.o
+libperf-y += dwarf-unwind.o
diff --git a/tools/perf/arch/arm64/tests/dwarf-unwind.c b/tools/perf/arch/arm64/tests/dwarf-unwind.c
new file mode 100644 (file)
index 0000000..cf04a4c
--- /dev/null
@@ -0,0 +1,61 @@
+#include <string.h>
+#include "perf_regs.h"
+#include "thread.h"
+#include "map.h"
+#include "event.h"
+#include "debug.h"
+#include "tests/tests.h"
+
+#define STACK_SIZE 8192
+
+static int sample_ustack(struct perf_sample *sample,
+               struct thread *thread, u64 *regs)
+{
+       struct stack_dump *stack = &sample->user_stack;
+       struct map *map;
+       unsigned long sp;
+       u64 stack_size, *buf;
+
+       buf = malloc(STACK_SIZE);
+       if (!buf) {
+               pr_debug("failed to allocate sample uregs data\n");
+               return -1;
+       }
+
+       sp = (unsigned long) regs[PERF_REG_ARM64_SP];
+
+       map = map_groups__find(thread->mg, MAP__VARIABLE, (u64) sp);
+       if (!map) {
+               pr_debug("failed to get stack map\n");
+               free(buf);
+               return -1;
+       }
+
+       stack_size = map->end - sp;
+       stack_size = stack_size > STACK_SIZE ? STACK_SIZE : stack_size;
+
+       memcpy(buf, (void *) sp, stack_size);
+       stack->data = (char *) buf;
+       stack->size = stack_size;
+       return 0;
+}
+
+int test__arch_unwind_sample(struct perf_sample *sample,
+               struct thread *thread)
+{
+       struct regs_dump *regs = &sample->user_regs;
+       u64 *buf;
+
+       buf = calloc(1, sizeof(u64) * PERF_REGS_MAX);
+       if (!buf) {
+               pr_debug("failed to allocate sample uregs data\n");
+               return -1;
+       }
+
+       perf_regs_load(buf);
+       regs->abi  = PERF_SAMPLE_REGS_ABI;
+       regs->regs = buf;
+       regs->mask = PERF_REGS_MASK;
+
+       return sample_ustack(sample, thread, buf);
+}
diff --git a/tools/perf/arch/arm64/tests/regs_load.S b/tools/perf/arch/arm64/tests/regs_load.S
new file mode 100644 (file)
index 0000000..025b46e
--- /dev/null
@@ -0,0 +1,46 @@
+#include <linux/linkage.h>
+
+.text
+.type perf_regs_load,%function
+#define STR_REG(r)     str x##r, [x0, 8 * r]
+#define LDR_REG(r)     ldr x##r, [x0, 8 * r]
+#define SP     (8 * 31)
+#define PC     (8 * 32)
+ENTRY(perf_regs_load)
+       STR_REG(0)
+       STR_REG(1)
+       STR_REG(2)
+       STR_REG(3)
+       STR_REG(4)
+       STR_REG(5)
+       STR_REG(6)
+       STR_REG(7)
+       STR_REG(8)
+       STR_REG(9)
+       STR_REG(10)
+       STR_REG(11)
+       STR_REG(12)
+       STR_REG(13)
+       STR_REG(14)
+       STR_REG(15)
+       STR_REG(16)
+       STR_REG(17)
+       STR_REG(18)
+       STR_REG(19)
+       STR_REG(20)
+       STR_REG(21)
+       STR_REG(22)
+       STR_REG(23)
+       STR_REG(24)
+       STR_REG(25)
+       STR_REG(26)
+       STR_REG(27)
+       STR_REG(28)
+       STR_REG(29)
+       STR_REG(30)
+       mov x1, sp
+       str x1, [x0, #SP]
+       str x30, [x0, #PC]
+       LDR_REG(1)
+       ret
+ENDPROC(perf_regs_load)
index 49776f190abfab295920534840aa1df479a4c094..b7bb42c4469401d76527264f5e538b363b1d4bf9 100644 (file)
@@ -61,7 +61,7 @@ const char *const mips_triplets[] = {
 static bool lookup_path(char *name)
 {
        bool found = false;
-       char *path, *tmp;
+       char *path, *tmp = NULL;
        char buf[PATH_MAX];
        char *env = getenv("PATH");
 
index 0af6e9b3f72857af68ddef33862f00d884a5da1e..7b8b0d1a1b626065e0b414f42a7a998723e0e57f 100644 (file)
@@ -1,4 +1,5 @@
 libperf-y += header.o
+libperf-y += sym-handling.o
 
 libperf-$(CONFIG_DWARF) += dwarf-regs.o
 libperf-$(CONFIG_DWARF) += skip-callchain-idx.o
diff --git a/tools/perf/arch/powerpc/util/sym-handling.c b/tools/perf/arch/powerpc/util/sym-handling.c
new file mode 100644 (file)
index 0000000..bbc1a50
--- /dev/null
@@ -0,0 +1,82 @@
+/*
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, version 2, as
+ * published by the Free Software Foundation.
+ *
+ * Copyright (C) 2015 Naveen N. Rao, IBM Corporation
+ */
+
+#include "debug.h"
+#include "symbol.h"
+#include "map.h"
+#include "probe-event.h"
+
+#ifdef HAVE_LIBELF_SUPPORT
+bool elf__needs_adjust_symbols(GElf_Ehdr ehdr)
+{
+       return ehdr.e_type == ET_EXEC ||
+              ehdr.e_type == ET_REL ||
+              ehdr.e_type == ET_DYN;
+}
+
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+void arch__elf_sym_adjust(GElf_Sym *sym)
+{
+       sym->st_value += PPC64_LOCAL_ENTRY_OFFSET(sym->st_other);
+}
+#endif
+#endif
+
+#if !defined(_CALL_ELF) || _CALL_ELF != 2
+int arch__choose_best_symbol(struct symbol *syma,
+                            struct symbol *symb __maybe_unused)
+{
+       char *sym = syma->name;
+
+       /* Skip over any initial dot */
+       if (*sym == '.')
+               sym++;
+
+       /* Avoid "SyS" kernel syscall aliases */
+       if (strlen(sym) >= 3 && !strncmp(sym, "SyS", 3))
+               return SYMBOL_B;
+       if (strlen(sym) >= 10 && !strncmp(sym, "compat_SyS", 10))
+               return SYMBOL_B;
+
+       return SYMBOL_A;
+}
+
+/* Allow matching against dot variants */
+int arch__compare_symbol_names(const char *namea, const char *nameb)
+{
+       /* Skip over initial dot */
+       if (*namea == '.')
+               namea++;
+       if (*nameb == '.')
+               nameb++;
+
+       return strcmp(namea, nameb);
+}
+#endif
+
+#if defined(_CALL_ELF) && _CALL_ELF == 2
+bool arch__prefers_symtab(void)
+{
+       return true;
+}
+
+#define PPC64LE_LEP_OFFSET     8
+
+void arch__fix_tev_from_maps(struct perf_probe_event *pev,
+                            struct probe_trace_event *tev, struct map *map)
+{
+       /*
+        * ppc64 ABIv2 local entry point is currently always 2 instructions
+        * (8 bytes) after the global entry point.
+        */
+       if (!pev->uprobes && map->dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS) {
+               tev->point.address += PPC64LE_LEP_OFFSET;
+               tev->point.offset += PPC64LE_LEP_OFFSET;
+       }
+}
+#endif
index 5ce98023d518fce2d51ee11e35b0a0f2df12d8de..c3ab760e06b4d7627896723dc9fe2b15c5327a0b 100644 (file)
@@ -3,6 +3,7 @@ perf-y += sched-pipe.o
 perf-y += mem-memcpy.o
 perf-y += futex-hash.o
 perf-y += futex-wake.o
+perf-y += futex-wake-parallel.o
 perf-y += futex-requeue.o
 
 perf-$(CONFIG_X86_64) += mem-memcpy-x86-64-asm.o
index 3c4dd44d45cb7b668bd22822b6f105c1de9eaf86..70b2f718cc217976ee73bd656d66b9aa884dfded 100644 (file)
@@ -33,6 +33,8 @@ extern int bench_mem_memcpy(int argc, const char **argv,
 extern int bench_mem_memset(int argc, const char **argv, const char *prefix);
 extern int bench_futex_hash(int argc, const char **argv, const char *prefix);
 extern int bench_futex_wake(int argc, const char **argv, const char *prefix);
+extern int bench_futex_wake_parallel(int argc, const char **argv,
+                                    const char *prefix);
 extern int bench_futex_requeue(int argc, const char **argv, const char *prefix);
 
 #define BENCH_FORMAT_DEFAULT_STR       "default"
diff --git a/tools/perf/bench/futex-wake-parallel.c b/tools/perf/bench/futex-wake-parallel.c
new file mode 100644 (file)
index 0000000..6d8c9fa
--- /dev/null
@@ -0,0 +1,294 @@
+/*
+ * Copyright (C) 2015 Davidlohr Bueso.
+ *
+ * Block a bunch of threads and let parallel waker threads wakeup an
+ * equal amount of them. The program output reflects the avg latency
+ * for each individual thread to service its share of work. Ultimately
+ * it can be used to measure futex_wake() changes.
+ */
+
+#include "../perf.h"
+#include "../util/util.h"
+#include "../util/stat.h"
+#include "../util/parse-options.h"
+#include "../util/header.h"
+#include "bench.h"
+#include "futex.h"
+
+#include <err.h>
+#include <stdlib.h>
+#include <sys/time.h>
+#include <pthread.h>
+
+struct thread_data {
+       pthread_t worker;
+       unsigned int nwoken;
+       struct timeval runtime;
+};
+
+static unsigned int nwakes = 1;
+
+/* all threads will block on the same futex -- hash bucket chaos ;) */
+static u_int32_t futex = 0;
+
+static pthread_t *blocked_worker;
+static bool done = false, silent = false, fshared = false;
+static unsigned int nblocked_threads = 0, nwaking_threads = 0;
+static pthread_mutex_t thread_lock;
+static pthread_cond_t thread_parent, thread_worker;
+static struct stats waketime_stats, wakeup_stats;
+static unsigned int ncpus, threads_starting;
+static int futex_flag = 0;
+
+static const struct option options[] = {
+       OPT_UINTEGER('t', "threads", &nblocked_threads, "Specify amount of threads"),
+       OPT_UINTEGER('w', "nwakers", &nwaking_threads, "Specify amount of waking threads"),
+       OPT_BOOLEAN( 's', "silent",  &silent,   "Silent mode: do not display data/details"),
+       OPT_BOOLEAN( 'S', "shared",  &fshared,  "Use shared futexes instead of private ones"),
+       OPT_END()
+};
+
+static const char * const bench_futex_wake_parallel_usage[] = {
+       "perf bench futex wake-parallel <options>",
+       NULL
+};
+
+static void *waking_workerfn(void *arg)
+{
+       struct thread_data *waker = (struct thread_data *) arg;
+       struct timeval start, end;
+
+       gettimeofday(&start, NULL);
+
+       waker->nwoken = futex_wake(&futex, nwakes, futex_flag);
+       if (waker->nwoken != nwakes)
+               warnx("couldn't wakeup all tasks (%d/%d)",
+                     waker->nwoken, nwakes);
+
+       gettimeofday(&end, NULL);
+       timersub(&end, &start, &waker->runtime);
+
+       pthread_exit(NULL);
+       return NULL;
+}
+
+static void wakeup_threads(struct thread_data *td, pthread_attr_t thread_attr)
+{
+       unsigned int i;
+
+       pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
+
+       /* create and block all threads */
+       for (i = 0; i < nwaking_threads; i++) {
+               /*
+                * Thread creation order will impact per-thread latency
+                * as it will affect the order to acquire the hb spinlock.
+                * For now let the scheduler decide.
+                */
+               if (pthread_create(&td[i].worker, &thread_attr,
+                                  waking_workerfn, (void *)&td[i]))
+                       err(EXIT_FAILURE, "pthread_create");
+       }
+
+       for (i = 0; i < nwaking_threads; i++)
+               if (pthread_join(td[i].worker, NULL))
+                       err(EXIT_FAILURE, "pthread_join");
+}
+
+static void *blocked_workerfn(void *arg __maybe_unused)
+{
+       pthread_mutex_lock(&thread_lock);
+       threads_starting--;
+       if (!threads_starting)
+               pthread_cond_signal(&thread_parent);
+       pthread_cond_wait(&thread_worker, &thread_lock);
+       pthread_mutex_unlock(&thread_lock);
+
+       while (1) { /* handle spurious wakeups */
+               if (futex_wait(&futex, 0, NULL, futex_flag) != EINTR)
+                       break;
+       }
+
+       pthread_exit(NULL);
+       return NULL;
+}
+
+static void block_threads(pthread_t *w, pthread_attr_t thread_attr)
+{
+       cpu_set_t cpu;
+       unsigned int i;
+
+       threads_starting = nblocked_threads;
+
+       /* create and block all threads */
+       for (i = 0; i < nblocked_threads; i++) {
+               CPU_ZERO(&cpu);
+               CPU_SET(i % ncpus, &cpu);
+
+               if (pthread_attr_setaffinity_np(&thread_attr, sizeof(cpu_set_t), &cpu))
+                       err(EXIT_FAILURE, "pthread_attr_setaffinity_np");
+
+               if (pthread_create(&w[i], &thread_attr, blocked_workerfn, NULL))
+                       err(EXIT_FAILURE, "pthread_create");
+       }
+}
+
+static void print_run(struct thread_data *waking_worker, unsigned int run_num)
+{
+       unsigned int i, wakeup_avg;
+       double waketime_avg, waketime_stddev;
+       struct stats __waketime_stats, __wakeup_stats;
+
+       init_stats(&__wakeup_stats);
+       init_stats(&__waketime_stats);
+
+       for (i = 0; i < nwaking_threads; i++) {
+               update_stats(&__waketime_stats, waking_worker[i].runtime.tv_usec);
+               update_stats(&__wakeup_stats, waking_worker[i].nwoken);
+       }
+
+       waketime_avg = avg_stats(&__waketime_stats);
+       waketime_stddev = stddev_stats(&__waketime_stats);
+       wakeup_avg = avg_stats(&__wakeup_stats);
+
+       printf("[Run %d]: Avg per-thread latency (waking %d/%d threads) "
+              "in %.4f ms (+-%.2f%%)\n", run_num + 1, wakeup_avg,
+              nblocked_threads, waketime_avg/1e3,
+              rel_stddev_stats(waketime_stddev, waketime_avg));
+}
+
+static void print_summary(void)
+{
+       unsigned int wakeup_avg;
+       double waketime_avg, waketime_stddev;
+
+       waketime_avg = avg_stats(&waketime_stats);
+       waketime_stddev = stddev_stats(&waketime_stats);
+       wakeup_avg = avg_stats(&wakeup_stats);
+
+       printf("Avg per-thread latency (waking %d/%d threads) in %.4f ms (+-%.2f%%)\n",
+              wakeup_avg,
+              nblocked_threads,
+              waketime_avg/1e3,
+              rel_stddev_stats(waketime_stddev, waketime_avg));
+}
+
+
+static void do_run_stats(struct thread_data *waking_worker)
+{
+       unsigned int i;
+
+       for (i = 0; i < nwaking_threads; i++) {
+               update_stats(&waketime_stats, waking_worker[i].runtime.tv_usec);
+               update_stats(&wakeup_stats, waking_worker[i].nwoken);
+       }
+
+}
+
+static void toggle_done(int sig __maybe_unused,
+                       siginfo_t *info __maybe_unused,
+                       void *uc __maybe_unused)
+{
+       done = true;
+}
+
+int bench_futex_wake_parallel(int argc, const char **argv,
+                             const char *prefix __maybe_unused)
+{
+       int ret = 0;
+       unsigned int i, j;
+       struct sigaction act;
+       pthread_attr_t thread_attr;
+       struct thread_data *waking_worker;
+
+       argc = parse_options(argc, argv, options,
+                            bench_futex_wake_parallel_usage, 0);
+       if (argc) {
+               usage_with_options(bench_futex_wake_parallel_usage, options);
+               exit(EXIT_FAILURE);
+       }
+
+       sigfillset(&act.sa_mask);
+       act.sa_sigaction = toggle_done;
+       sigaction(SIGINT, &act, NULL);
+
+       ncpus = sysconf(_SC_NPROCESSORS_ONLN);
+       if (!nblocked_threads)
+               nblocked_threads = ncpus;
+
+       /* some sanity checks */
+       if (nwaking_threads > nblocked_threads || !nwaking_threads)
+               nwaking_threads = nblocked_threads;
+
+       if (nblocked_threads % nwaking_threads)
+               errx(EXIT_FAILURE, "Must be perfectly divisible");
+       /*
+        * Each thread will wakeup nwakes tasks in
+        * a single futex_wait call.
+        */
+       nwakes = nblocked_threads/nwaking_threads;
+
+       blocked_worker = calloc(nblocked_threads, sizeof(*blocked_worker));
+       if (!blocked_worker)
+               err(EXIT_FAILURE, "calloc");
+
+       if (!fshared)
+               futex_flag = FUTEX_PRIVATE_FLAG;
+
+       printf("Run summary [PID %d]: blocking on %d threads (at [%s] "
+              "futex %p), %d threads waking up %d at a time.\n\n",
+              getpid(), nblocked_threads, fshared ? "shared":"private",
+              &futex, nwaking_threads, nwakes);
+
+       init_stats(&wakeup_stats);
+       init_stats(&waketime_stats);
+
+       pthread_attr_init(&thread_attr);
+       pthread_mutex_init(&thread_lock, NULL);
+       pthread_cond_init(&thread_parent, NULL);
+       pthread_cond_init(&thread_worker, NULL);
+
+       for (j = 0; j < bench_repeat && !done; j++) {
+               waking_worker = calloc(nwaking_threads, sizeof(*waking_worker));
+               if (!waking_worker)
+                       err(EXIT_FAILURE, "calloc");
+
+               /* create, launch & block all threads */
+               block_threads(blocked_worker, thread_attr);
+
+               /* make sure all threads are already blocked */
+               pthread_mutex_lock(&thread_lock);
+               while (threads_starting)
+                       pthread_cond_wait(&thread_parent, &thread_lock);
+               pthread_cond_broadcast(&thread_worker);
+               pthread_mutex_unlock(&thread_lock);
+
+               usleep(100000);
+
+               /* Ok, all threads are patiently blocked, start waking folks up */
+               wakeup_threads(waking_worker, thread_attr);
+
+               for (i = 0; i < nblocked_threads; i++) {
+                       ret = pthread_join(blocked_worker[i], NULL);
+                       if (ret)
+                               err(EXIT_FAILURE, "pthread_join");
+               }
+
+               do_run_stats(waking_worker);
+               if (!silent)
+                       print_run(waking_worker, j);
+
+               free(waking_worker);
+       }
+
+       /* cleanup & report results */
+       pthread_cond_destroy(&thread_parent);
+       pthread_cond_destroy(&thread_worker);
+       pthread_mutex_destroy(&thread_lock);
+       pthread_attr_destroy(&thread_attr);
+
+       print_summary();
+
+       free(blocked_worker);
+       return ret;
+}
index 929f762be47e9735058f5c57bd394c4f09360c45..e5e41d3bdce724230c16a3df2a199914b9cab29d 100644 (file)
@@ -60,7 +60,12 @@ static void *workerfn(void *arg __maybe_unused)
        pthread_cond_wait(&thread_worker, &thread_lock);
        pthread_mutex_unlock(&thread_lock);
 
-       futex_wait(&futex1, 0, NULL, futex_flag);
+       while (1) {
+               if (futex_wait(&futex1, 0, NULL, futex_flag) != EINTR)
+                       break;
+       }
+
+       pthread_exit(NULL);
        return NULL;
 }
 
index ba5efa4710b558239ff79c08b025ddc2da06efc5..870b7e665a203264c1b7b27684a860cbe147c450 100644 (file)
@@ -8,6 +8,7 @@
 #include "../builtin.h"
 #include "../util/util.h"
 #include "../util/parse-options.h"
+#include "../util/cloexec.h"
 
 #include "bench.h"
 
@@ -23,6 +24,7 @@
 #include <pthread.h>
 #include <sys/mman.h>
 #include <sys/time.h>
+#include <sys/resource.h>
 #include <sys/wait.h>
 #include <sys/prctl.h>
 #include <sys/types.h>
@@ -51,6 +53,9 @@ struct thread_data {
        unsigned int            loops_done;
        u64                     val;
        u64                     runtime_ns;
+       u64                     system_time_ns;
+       u64                     user_time_ns;
+       double                  speed_gbs;
        pthread_mutex_t         *process_lock;
 };
 
@@ -1042,6 +1047,7 @@ static void *worker_thread(void *__tdata)
        u64 bytes_done;
        long work_done;
        u32 l;
+       struct rusage rusage;
 
        bind_to_cpumask(td->bind_cpumask);
        bind_to_memnode(td->bind_node);
@@ -1194,6 +1200,13 @@ static void *worker_thread(void *__tdata)
        timersub(&stop, &start0, &diff);
        td->runtime_ns = diff.tv_sec * 1000000000ULL;
        td->runtime_ns += diff.tv_usec * 1000ULL;
+       td->speed_gbs = bytes_done / (td->runtime_ns / 1e9) / 1e9;
+
+       getrusage(RUSAGE_THREAD, &rusage);
+       td->system_time_ns = rusage.ru_stime.tv_sec * 1000000000ULL;
+       td->system_time_ns += rusage.ru_stime.tv_usec * 1000ULL;
+       td->user_time_ns = rusage.ru_utime.tv_sec * 1000000000ULL;
+       td->user_time_ns += rusage.ru_utime.tv_usec * 1000ULL;
 
        free_data(thread_data, g->p.bytes_thread);
 
@@ -1420,7 +1433,7 @@ static int __bench_numa(const char *name)
        double runtime_sec_min;
        int wait_stat;
        double bytes;
-       int i, t;
+       int i, t, p;
 
        if (init())
                return -1;
@@ -1556,6 +1569,24 @@ static int __bench_numa(const char *name)
        print_res(name, bytes / runtime_sec_max / 1e9,
                "GB/sec,", "total-speed",       "GB/sec total speed");
 
+       if (g->p.show_details >= 2) {
+               char tname[32];
+               struct thread_data *td;
+               for (p = 0; p < g->p.nr_proc; p++) {
+                       for (t = 0; t < g->p.nr_threads; t++) {
+                               memset(tname, 0, 32);
+                               td = g->threads + p*g->p.nr_threads + t;
+                               snprintf(tname, 32, "process%d:thread%d", p, t);
+                               print_res(tname, td->speed_gbs,
+                                       "GB/sec",       "thread-speed", "GB/sec/thread speed");
+                               print_res(tname, td->system_time_ns / 1e9,
+                                       "secs", "thread-system-time", "system CPU time/thread");
+                               print_res(tname, td->user_time_ns / 1e9,
+                                       "secs", "thread-user-time", "user CPU time/thread");
+                       }
+               }
+       }
+
        free(pids);
 
        deinit();
index 71bf7451c0cad1bf1f43b946631bf949eed96c3c..2c1bec39c30ea191fecb46654df14dc6229d27d5 100644 (file)
@@ -59,6 +59,10 @@ static int perf_evsel__add_sample(struct perf_evsel *evsel,
            (al->sym == NULL ||
             strcmp(ann->sym_hist_filter, al->sym->name) != 0)) {
                /* We're only interested in a symbol named sym_hist_filter */
+               /*
+                * FIXME: why isn't this done in the symbol_filter when loading
+                * the DSO?
+                */
                if (al->sym != NULL) {
                        rb_erase(&al->sym->rb_node,
                                 &al->map->dso->symbols[al->map->type]);
@@ -84,6 +88,7 @@ static int process_sample_event(struct perf_tool *tool,
 {
        struct perf_annotate *ann = container_of(tool, struct perf_annotate, tool);
        struct addr_location al;
+       int ret = 0;
 
        if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
                pr_warning("problem processing %d event, skipping it.\n",
@@ -92,15 +97,16 @@ static int process_sample_event(struct perf_tool *tool,
        }
 
        if (ann->cpu_list && !test_bit(sample->cpu, ann->cpu_bitmap))
-               return 0;
+               goto out_put;
 
        if (!al.filtered && perf_evsel__add_sample(evsel, sample, &al, ann)) {
                pr_warning("problem incrementing symbol count, "
                           "skipping event\n");
-               return -1;
+               ret = -1;
        }
-
-       return 0;
+out_put:
+       addr_location__put(&al);
+       return ret;
 }
 
 static int hist_entry__tty_annotate(struct hist_entry *he,
@@ -283,7 +289,6 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused)
                },
        };
        struct perf_data_file file = {
-               .path  = input_name,
                .mode  = PERF_DATA_MODE_READ,
        };
        const struct option options[] = {
@@ -324,6 +329,8 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused)
                   "objdump binary to use for disassembly and annotations"),
        OPT_BOOLEAN(0, "group", &symbol_conf.event_group,
                    "Show event group information together"),
+       OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
+                   "Show a column with the sum of periods"),
        OPT_END()
        };
        int ret = hists__init();
@@ -340,6 +347,8 @@ int cmd_annotate(int argc, const char **argv, const char *prefix __maybe_unused)
        else if (annotate.use_gtk)
                use_browser = 2;
 
+       file.path  = input_name;
+
        setup_browser(true);
 
        annotate.session = perf_session__new(&file, false, &annotate.tool);
index b9a56fa8333065271a9242e98cf052d9c8100b87..b5314e452ec7f24a2e9e4e6b8473d39b8ed7a4a7 100644 (file)
@@ -58,6 +58,7 @@ static struct bench mem_benchmarks[] = {
 static struct bench futex_benchmarks[] = {
        { "hash",       "Benchmark for futex hash table",               bench_futex_hash        },
        { "wake",       "Benchmark for futex wake calls",               bench_futex_wake        },
+       { "wake-parallel", "Benchmark for parallel futex wake calls",   bench_futex_wake_parallel },
        { "requeue",    "Benchmark for futex requeue calls",            bench_futex_requeue     },
        { "all",        "Test all futex benchmarks",                    NULL                    },
        { NULL,         NULL,                                           NULL                    }
index feb420f74c2d9fd34778a20e93764c2e5eaf47c8..9fe93c8d4fcff11b3c2638d901d8c62da5b921d2 100644 (file)
@@ -69,6 +69,15 @@ static int perf_session__list_build_ids(bool force, bool with_hits)
        session = perf_session__new(&file, false, &build_id__mark_dso_hit_ops);
        if (session == NULL)
                return -1;
+
+       /*
+        * We take all buildids when the file contains AUX area tracing data
+        * because we do not decode the trace because it would take too long.
+        */
+       if (!perf_data_file__is_pipe(&file) &&
+           perf_header__has_feat(&session->header, HEADER_AUXTRACE))
+               with_hits = false;
+
        /*
         * in pipe-mode, the only way to get the buildids is to parse
         * the record stream. Buildids are stored as RECORD_HEADER_BUILD_ID
index df6307b4050aaa2ab7e51403d22563e0a1866279..daaa7dca9c3ba81e1e2a7c671dc7660fe34ba4a5 100644 (file)
@@ -328,6 +328,7 @@ static int diff__process_sample_event(struct perf_tool *tool __maybe_unused,
 {
        struct addr_location al;
        struct hists *hists = evsel__hists(evsel);
+       int ret = -1;
 
        if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
                pr_warning("problem processing %d event, skipping it.\n",
@@ -338,7 +339,7 @@ static int diff__process_sample_event(struct perf_tool *tool __maybe_unused,
        if (hists__add_entry(hists, &al, sample->period,
                             sample->weight, sample->transaction)) {
                pr_warning("problem incrementing symbol period, skipping event\n");
-               return -1;
+               goto out_put;
        }
 
        /*
@@ -350,8 +351,10 @@ static int diff__process_sample_event(struct perf_tool *tool __maybe_unused,
        hists->stats.total_period += sample->period;
        if (!al.filtered)
                hists->stats.total_non_filtered_period += sample->period;
-
-       return 0;
+       ret = 0;
+out_put:
+       addr_location__put(&al);
+       return ret;
 }
 
 static struct perf_tool tool = {
index 40a33d7334cce224fb0ac8317554189f1821dfcf..52ec66b236076c46c1bd0666967d1d204d2c95c3 100644 (file)
@@ -16,6 +16,7 @@
 #include "util/debug.h"
 #include "util/build-id.h"
 #include "util/data.h"
+#include "util/auxtrace.h"
 
 #include "util/parse-options.h"
 
@@ -26,10 +27,12 @@ struct perf_inject {
        struct perf_session     *session;
        bool                    build_ids;
        bool                    sched_stat;
+       bool                    have_auxtrace;
        const char              *input_name;
        struct perf_data_file   output;
        u64                     bytes_written;
        struct list_head        samples;
+       struct itrace_synth_opts itrace_synth_opts;
 };
 
 struct event_entry {
@@ -38,14 +41,11 @@ struct event_entry {
        union perf_event event[0];
 };
 
-static int perf_event__repipe_synth(struct perf_tool *tool,
-                                   union perf_event *event)
+static int output_bytes(struct perf_inject *inject, void *buf, size_t sz)
 {
-       struct perf_inject *inject = container_of(tool, struct perf_inject, tool);
        ssize_t size;
 
-       size = perf_data_file__write(&inject->output, event,
-                                    event->header.size);
+       size = perf_data_file__write(&inject->output, buf, sz);
        if (size < 0)
                return -errno;
 
@@ -53,6 +53,15 @@ static int perf_event__repipe_synth(struct perf_tool *tool,
        return 0;
 }
 
+static int perf_event__repipe_synth(struct perf_tool *tool,
+                                   union perf_event *event)
+{
+       struct perf_inject *inject = container_of(tool, struct perf_inject,
+                                                 tool);
+
+       return output_bytes(inject, event, event->header.size);
+}
+
 static int perf_event__repipe_oe_synth(struct perf_tool *tool,
                                       union perf_event *event,
                                       struct ordered_events *oe __maybe_unused)
@@ -86,6 +95,79 @@ static int perf_event__repipe_attr(struct perf_tool *tool,
        return perf_event__repipe_synth(tool, event);
 }
 
+#ifdef HAVE_AUXTRACE_SUPPORT
+
+static int copy_bytes(struct perf_inject *inject, int fd, off_t size)
+{
+       char buf[4096];
+       ssize_t ssz;
+       int ret;
+
+       while (size > 0) {
+               ssz = read(fd, buf, min(size, (off_t)sizeof(buf)));
+               if (ssz < 0)
+                       return -errno;
+               ret = output_bytes(inject, buf, ssz);
+               if (ret)
+                       return ret;
+               size -= ssz;
+       }
+
+       return 0;
+}
+
+static s64 perf_event__repipe_auxtrace(struct perf_tool *tool,
+                                      union perf_event *event,
+                                      struct perf_session *session
+                                      __maybe_unused)
+{
+       struct perf_inject *inject = container_of(tool, struct perf_inject,
+                                                 tool);
+       int ret;
+
+       inject->have_auxtrace = true;
+
+       if (!inject->output.is_pipe) {
+               off_t offset;
+
+               offset = lseek(inject->output.fd, 0, SEEK_CUR);
+               if (offset == -1)
+                       return -errno;
+               ret = auxtrace_index__auxtrace_event(&session->auxtrace_index,
+                                                    event, offset);
+               if (ret < 0)
+                       return ret;
+       }
+
+       if (perf_data_file__is_pipe(session->file) || !session->one_mmap) {
+               ret = output_bytes(inject, event, event->header.size);
+               if (ret < 0)
+                       return ret;
+               ret = copy_bytes(inject, perf_data_file__fd(session->file),
+                                event->auxtrace.size);
+       } else {
+               ret = output_bytes(inject, event,
+                                  event->header.size + event->auxtrace.size);
+       }
+       if (ret < 0)
+               return ret;
+
+       return event->auxtrace.size;
+}
+
+#else
+
+static s64
+perf_event__repipe_auxtrace(struct perf_tool *tool __maybe_unused,
+                           union perf_event *event __maybe_unused,
+                           struct perf_session *session __maybe_unused)
+{
+       pr_err("AUX area tracing not supported\n");
+       return -EINVAL;
+}
+
+#endif
+
 static int perf_event__repipe(struct perf_tool *tool,
                              union perf_event *event,
                              struct perf_sample *sample __maybe_unused,
@@ -155,6 +237,32 @@ static int perf_event__repipe_fork(struct perf_tool *tool,
        return err;
 }
 
+static int perf_event__repipe_comm(struct perf_tool *tool,
+                                  union perf_event *event,
+                                  struct perf_sample *sample,
+                                  struct machine *machine)
+{
+       int err;
+
+       err = perf_event__process_comm(tool, event, sample, machine);
+       perf_event__repipe(tool, event, sample, machine);
+
+       return err;
+}
+
+static int perf_event__repipe_exit(struct perf_tool *tool,
+                                  union perf_event *event,
+                                  struct perf_sample *sample,
+                                  struct machine *machine)
+{
+       int err;
+
+       err = perf_event__process_exit(tool, event, sample, machine);
+       perf_event__repipe(tool, event, sample, machine);
+
+       return err;
+}
+
 static int perf_event__repipe_tracing_data(struct perf_tool *tool,
                                           union perf_event *event,
                                           struct perf_session *session)
@@ -167,6 +275,18 @@ static int perf_event__repipe_tracing_data(struct perf_tool *tool,
        return err;
 }
 
+static int perf_event__repipe_id_index(struct perf_tool *tool,
+                                      union perf_event *event,
+                                      struct perf_session *session)
+{
+       int err;
+
+       perf_event__repipe_synth(tool, event);
+       err = perf_event__process_id_index(tool, event, session);
+
+       return err;
+}
+
 static int dso__read_build_id(struct dso *dso)
 {
        if (dso->has_build_id)
@@ -245,6 +365,7 @@ static int perf_event__inject_buildid(struct perf_tool *tool,
                }
        }
 
+       thread__put(thread);
 repipe:
        perf_event__repipe(tool, event, sample, machine);
        return 0;
@@ -351,16 +472,20 @@ static int __cmd_inject(struct perf_inject *inject)
        struct perf_session *session = inject->session;
        struct perf_data_file *file_out = &inject->output;
        int fd = perf_data_file__fd(file_out);
+       u64 output_data_offset;
 
        signal(SIGINT, sig_handler);
 
-       if (inject->build_ids || inject->sched_stat) {
+       if (inject->build_ids || inject->sched_stat ||
+           inject->itrace_synth_opts.set) {
                inject->tool.mmap         = perf_event__repipe_mmap;
                inject->tool.mmap2        = perf_event__repipe_mmap2;
                inject->tool.fork         = perf_event__repipe_fork;
                inject->tool.tracing_data = perf_event__repipe_tracing_data;
        }
 
+       output_data_offset = session->header.data_offset;
+
        if (inject->build_ids) {
                inject->tool.sample = perf_event__inject_buildid;
        } else if (inject->sched_stat) {
@@ -379,17 +504,43 @@ static int __cmd_inject(struct perf_inject *inject)
                        else if (!strncmp(name, "sched:sched_stat_", 17))
                                evsel->handler = perf_inject__sched_stat;
                }
+       } else if (inject->itrace_synth_opts.set) {
+               session->itrace_synth_opts = &inject->itrace_synth_opts;
+               inject->itrace_synth_opts.inject = true;
+               inject->tool.comm           = perf_event__repipe_comm;
+               inject->tool.exit           = perf_event__repipe_exit;
+               inject->tool.id_index       = perf_event__repipe_id_index;
+               inject->tool.auxtrace_info  = perf_event__process_auxtrace_info;
+               inject->tool.auxtrace       = perf_event__process_auxtrace;
+               inject->tool.ordered_events = true;
+               inject->tool.ordering_requires_timestamps = true;
+               /* Allow space in the header for new attributes */
+               output_data_offset = 4096;
        }
 
+       if (!inject->itrace_synth_opts.set)
+               auxtrace_index__free(&session->auxtrace_index);
+
        if (!file_out->is_pipe)
-               lseek(fd, session->header.data_offset, SEEK_SET);
+               lseek(fd, output_data_offset, SEEK_SET);
 
        ret = perf_session__process_events(session);
 
        if (!file_out->is_pipe) {
-               if (inject->build_ids)
+               if (inject->build_ids) {
                        perf_header__set_feat(&session->header,
                                              HEADER_BUILD_ID);
+                       if (inject->have_auxtrace)
+                               dsos__hit_all(session);
+               }
+               /*
+                * The AUX areas have been removed and replaced with
+                * synthesized hardware events, so clear the feature flag.
+                */
+               if (inject->itrace_synth_opts.set)
+                       perf_header__clear_feat(&session->header,
+                                               HEADER_AUXTRACE);
+               session->header.data_offset = output_data_offset;
                session->header.data_size = inject->bytes_written;
                perf_session__write_header(session, session->evlist, fd, true);
        }
@@ -408,11 +559,16 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
                        .fork           = perf_event__repipe,
                        .exit           = perf_event__repipe,
                        .lost           = perf_event__repipe,
+                       .aux            = perf_event__repipe,
+                       .itrace_start   = perf_event__repipe,
                        .read           = perf_event__repipe_sample,
                        .throttle       = perf_event__repipe,
                        .unthrottle     = perf_event__repipe,
                        .attr           = perf_event__repipe_attr,
                        .tracing_data   = perf_event__repipe_op2_synth,
+                       .auxtrace_info  = perf_event__repipe_op2_synth,
+                       .auxtrace       = perf_event__repipe_auxtrace,
+                       .auxtrace_error = perf_event__repipe_op2_synth,
                        .finished_round = perf_event__repipe_oe_synth,
                        .build_id       = perf_event__repipe_op2_synth,
                        .id_index       = perf_event__repipe_op2_synth,
@@ -444,6 +600,9 @@ int cmd_inject(int argc, const char **argv, const char *prefix __maybe_unused)
                OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name, "file",
                           "kallsyms pathname"),
                OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"),
+               OPT_CALLBACK_OPTARG(0, "itrace", &inject.itrace_synth_opts,
+                                   NULL, "opts", "Instruction Tracing options",
+                                   itrace_parse_synth_opts),
                OPT_END()
        };
        const char * const inject_usage[] = {
index 1634186d537cdc2eb2ee38b174891361ef13db9f..950f296dfcf7a402ebbad0df1edf16e4d62a52bd 100644 (file)
@@ -10,6 +10,7 @@
 #include "util/header.h"
 #include "util/session.h"
 #include "util/tool.h"
+#include "util/callchain.h"
 
 #include "util/parse-options.h"
 #include "util/trace-event.h"
 #include <linux/rbtree.h>
 #include <linux/string.h>
 #include <locale.h>
+#include <regex.h>
 
 static int     kmem_slab;
 static int     kmem_page;
 
 static long    kmem_page_size;
+static enum {
+       KMEM_SLAB,
+       KMEM_PAGE,
+} kmem_default = KMEM_SLAB;  /* for backward compatibility */
 
 struct alloc_stat;
-typedef int (*sort_fn_t)(struct alloc_stat *, struct alloc_stat *);
+typedef int (*sort_fn_t)(void *, void *);
 
 static int                     alloc_flag;
 static int                     caller_flag;
@@ -179,8 +185,8 @@ static int perf_evsel__process_alloc_node_event(struct perf_evsel *evsel,
        return ret;
 }
 
-static int ptr_cmp(struct alloc_stat *, struct alloc_stat *);
-static int callsite_cmp(struct alloc_stat *, struct alloc_stat *);
+static int ptr_cmp(void *, void *);
+static int slab_callsite_cmp(void *, void *);
 
 static struct alloc_stat *search_alloc_stat(unsigned long ptr,
                                            unsigned long call_site,
@@ -221,7 +227,8 @@ static int perf_evsel__process_free_event(struct perf_evsel *evsel,
                s_alloc->pingpong++;
 
                s_caller = search_alloc_stat(0, s_alloc->call_site,
-                                            &root_caller_stat, callsite_cmp);
+                                            &root_caller_stat,
+                                            slab_callsite_cmp);
                if (!s_caller)
                        return -1;
                s_caller->pingpong++;
@@ -241,6 +248,8 @@ static unsigned long nr_page_fails;
 static unsigned long nr_page_nomatch;
 
 static bool use_pfn;
+static bool live_page;
+static struct perf_session *kmem_session;
 
 #define MAX_MIGRATE_TYPES  6
 #define MAX_PAGE_ORDER     11
@@ -250,6 +259,7 @@ static int order_stats[MAX_PAGE_ORDER][MAX_MIGRATE_TYPES];
 struct page_stat {
        struct rb_node  node;
        u64             page;
+       u64             callsite;
        int             order;
        unsigned        gfp_flags;
        unsigned        migrate_type;
@@ -259,13 +269,158 @@ struct page_stat {
        int             nr_free;
 };
 
-static struct rb_root page_tree;
+static struct rb_root page_live_tree;
 static struct rb_root page_alloc_tree;
 static struct rb_root page_alloc_sorted;
+static struct rb_root page_caller_tree;
+static struct rb_root page_caller_sorted;
 
-static struct page_stat *search_page(unsigned long page, bool create)
+struct alloc_func {
+       u64 start;
+       u64 end;
+       char *name;
+};
+
+static int nr_alloc_funcs;
+static struct alloc_func *alloc_func_list;
+
+static int funcmp(const void *a, const void *b)
+{
+       const struct alloc_func *fa = a;
+       const struct alloc_func *fb = b;
+
+       if (fa->start > fb->start)
+               return 1;
+       else
+               return -1;
+}
+
+static int callcmp(const void *a, const void *b)
+{
+       const struct alloc_func *fa = a;
+       const struct alloc_func *fb = b;
+
+       if (fb->start <= fa->start && fa->end < fb->end)
+               return 0;
+
+       if (fa->start > fb->start)
+               return 1;
+       else
+               return -1;
+}
+
+static int build_alloc_func_list(void)
 {
-       struct rb_node **node = &page_tree.rb_node;
+       int ret;
+       struct map *kernel_map;
+       struct symbol *sym;
+       struct rb_node *node;
+       struct alloc_func *func;
+       struct machine *machine = &kmem_session->machines.host;
+       regex_t alloc_func_regex;
+       const char pattern[] = "^_?_?(alloc|get_free|get_zeroed)_pages?";
+
+       ret = regcomp(&alloc_func_regex, pattern, REG_EXTENDED);
+       if (ret) {
+               char err[BUFSIZ];
+
+               regerror(ret, &alloc_func_regex, err, sizeof(err));
+               pr_err("Invalid regex: %s\n%s", pattern, err);
+               return -EINVAL;
+       }
+
+       kernel_map = machine->vmlinux_maps[MAP__FUNCTION];
+       if (map__load(kernel_map, NULL) < 0) {
+               pr_err("cannot load kernel map\n");
+               return -ENOENT;
+       }
+
+       map__for_each_symbol(kernel_map, sym, node) {
+               if (regexec(&alloc_func_regex, sym->name, 0, NULL, 0))
+                       continue;
+
+               func = realloc(alloc_func_list,
+                              (nr_alloc_funcs + 1) * sizeof(*func));
+               if (func == NULL)
+                       return -ENOMEM;
+
+               pr_debug("alloc func: %s\n", sym->name);
+               func[nr_alloc_funcs].start = sym->start;
+               func[nr_alloc_funcs].end   = sym->end;
+               func[nr_alloc_funcs].name  = sym->name;
+
+               alloc_func_list = func;
+               nr_alloc_funcs++;
+       }
+
+       qsort(alloc_func_list, nr_alloc_funcs, sizeof(*func), funcmp);
+
+       regfree(&alloc_func_regex);
+       return 0;
+}
+
+/*
+ * Find first non-memory allocation function from callchain.
+ * The allocation functions are in the 'alloc_func_list'.
+ */
+static u64 find_callsite(struct perf_evsel *evsel, struct perf_sample *sample)
+{
+       struct addr_location al;
+       struct machine *machine = &kmem_session->machines.host;
+       struct callchain_cursor_node *node;
+
+       if (alloc_func_list == NULL) {
+               if (build_alloc_func_list() < 0)
+                       goto out;
+       }
+
+       al.thread = machine__findnew_thread(machine, sample->pid, sample->tid);
+       sample__resolve_callchain(sample, NULL, evsel, &al, 16);
+
+       callchain_cursor_commit(&callchain_cursor);
+       while (true) {
+               struct alloc_func key, *caller;
+               u64 addr;
+
+               node = callchain_cursor_current(&callchain_cursor);
+               if (node == NULL)
+                       break;
+
+               key.start = key.end = node->ip;
+               caller = bsearch(&key, alloc_func_list, nr_alloc_funcs,
+                                sizeof(key), callcmp);
+               if (!caller) {
+                       /* found */
+                       if (node->map)
+                               addr = map__unmap_ip(node->map, node->ip);
+                       else
+                               addr = node->ip;
+
+                       return addr;
+               } else
+                       pr_debug3("skipping alloc function: %s\n", caller->name);
+
+               callchain_cursor_advance(&callchain_cursor);
+       }
+
+out:
+       pr_debug2("unknown callsite: %"PRIx64 "\n", sample->ip);
+       return sample->ip;
+}
+
+struct sort_dimension {
+       const char              name[20];
+       sort_fn_t               cmp;
+       struct list_head        list;
+};
+
+static LIST_HEAD(page_alloc_sort_input);
+static LIST_HEAD(page_caller_sort_input);
+
+static struct page_stat *
+__page_stat__findnew_page(struct page_stat *pstat, bool create)
+{
+       struct rb_node **node = &page_live_tree.rb_node;
        struct rb_node *parent = NULL;
        struct page_stat *data;
 
@@ -275,7 +430,7 @@ static struct page_stat *search_page(unsigned long page, bool create)
                parent = *node;
                data = rb_entry(*node, struct page_stat, node);
 
-               cmp = data->page - page;
+               cmp = data->page - pstat->page;
                if (cmp < 0)
                        node = &parent->rb_left;
                else if (cmp > 0)
@@ -289,49 +444,48 @@ static struct page_stat *search_page(unsigned long page, bool create)
 
        data = zalloc(sizeof(*data));
        if (data != NULL) {
-               data->page = page;
+               data->page = pstat->page;
+               data->order = pstat->order;
+               data->gfp_flags = pstat->gfp_flags;
+               data->migrate_type = pstat->migrate_type;
 
                rb_link_node(&data->node, parent, node);
-               rb_insert_color(&data->node, &page_tree);
+               rb_insert_color(&data->node, &page_live_tree);
        }
 
        return data;
 }
 
-static int page_stat_cmp(struct page_stat *a, struct page_stat *b)
+static struct page_stat *page_stat__find_page(struct page_stat *pstat)
 {
-       if (a->page > b->page)
-               return -1;
-       if (a->page < b->page)
-               return 1;
-       if (a->order > b->order)
-               return -1;
-       if (a->order < b->order)
-               return 1;
-       if (a->migrate_type > b->migrate_type)
-               return -1;
-       if (a->migrate_type < b->migrate_type)
-               return 1;
-       if (a->gfp_flags > b->gfp_flags)
-               return -1;
-       if (a->gfp_flags < b->gfp_flags)
-               return 1;
-       return 0;
+       return __page_stat__findnew_page(pstat, false);
+}
+
+static struct page_stat *page_stat__findnew_page(struct page_stat *pstat)
+{
+       return __page_stat__findnew_page(pstat, true);
 }
 
-static struct page_stat *search_page_alloc_stat(struct page_stat *pstat, bool create)
+static struct page_stat *
+__page_stat__findnew_alloc(struct page_stat *pstat, bool create)
 {
        struct rb_node **node = &page_alloc_tree.rb_node;
        struct rb_node *parent = NULL;
        struct page_stat *data;
+       struct sort_dimension *sort;
 
        while (*node) {
-               s64 cmp;
+               int cmp = 0;
 
                parent = *node;
                data = rb_entry(*node, struct page_stat, node);
 
-               cmp = page_stat_cmp(data, pstat);
+               list_for_each_entry(sort, &page_alloc_sort_input, list) {
+                       cmp = sort->cmp(pstat, data);
+                       if (cmp)
+                               break;
+               }
+
                if (cmp < 0)
                        node = &parent->rb_left;
                else if (cmp > 0)
@@ -357,6 +511,71 @@ static struct page_stat *search_page_alloc_stat(struct page_stat *pstat, bool cr
        return data;
 }
 
+static struct page_stat *page_stat__find_alloc(struct page_stat *pstat)
+{
+       return __page_stat__findnew_alloc(pstat, false);
+}
+
+static struct page_stat *page_stat__findnew_alloc(struct page_stat *pstat)
+{
+       return __page_stat__findnew_alloc(pstat, true);
+}
+
+static struct page_stat *
+__page_stat__findnew_caller(struct page_stat *pstat, bool create)
+{
+       struct rb_node **node = &page_caller_tree.rb_node;
+       struct rb_node *parent = NULL;
+       struct page_stat *data;
+       struct sort_dimension *sort;
+
+       while (*node) {
+               int cmp = 0;
+
+               parent = *node;
+               data = rb_entry(*node, struct page_stat, node);
+
+               list_for_each_entry(sort, &page_caller_sort_input, list) {
+                       cmp = sort->cmp(pstat, data);
+                       if (cmp)
+                               break;
+               }
+
+               if (cmp < 0)
+                       node = &parent->rb_left;
+               else if (cmp > 0)
+                       node = &parent->rb_right;
+               else
+                       return data;
+       }
+
+       if (!create)
+               return NULL;
+
+       data = zalloc(sizeof(*data));
+       if (data != NULL) {
+               data->callsite = pstat->callsite;
+               data->order = pstat->order;
+               data->gfp_flags = pstat->gfp_flags;
+               data->migrate_type = pstat->migrate_type;
+
+               rb_link_node(&data->node, parent, node);
+               rb_insert_color(&data->node, &page_caller_tree);
+       }
+
+       return data;
+}
+
+static struct page_stat *page_stat__find_caller(struct page_stat *pstat)
+{
+       return __page_stat__findnew_caller(pstat, false);
+}
+
+static struct page_stat *page_stat__findnew_caller(struct page_stat *pstat)
+{
+       return __page_stat__findnew_caller(pstat, true);
+}
+
 static bool valid_page(u64 pfn_or_page)
 {
        if (use_pfn && pfn_or_page == -1UL)
@@ -366,6 +585,176 @@ static bool valid_page(u64 pfn_or_page)
        return true;
 }
 
+struct gfp_flag {
+       unsigned int flags;
+       char *compact_str;
+       char *human_readable;
+};
+
+static struct gfp_flag *gfps;
+static int nr_gfps;
+
+static int gfpcmp(const void *a, const void *b)
+{
+       const struct gfp_flag *fa = a;
+       const struct gfp_flag *fb = b;
+
+       return fa->flags - fb->flags;
+}
+
+/* see include/trace/events/gfpflags.h */
+static const struct {
+       const char *original;
+       const char *compact;
+} gfp_compact_table[] = {
+       { "GFP_TRANSHUGE",              "THP" },
+       { "GFP_HIGHUSER_MOVABLE",       "HUM" },
+       { "GFP_HIGHUSER",               "HU" },
+       { "GFP_USER",                   "U" },
+       { "GFP_TEMPORARY",              "TMP" },
+       { "GFP_KERNEL",                 "K" },
+       { "GFP_NOFS",                   "NF" },
+       { "GFP_ATOMIC",                 "A" },
+       { "GFP_NOIO",                   "NI" },
+       { "GFP_HIGH",                   "H" },
+       { "GFP_WAIT",                   "W" },
+       { "GFP_IO",                     "I" },
+       { "GFP_COLD",                   "CO" },
+       { "GFP_NOWARN",                 "NWR" },
+       { "GFP_REPEAT",                 "R" },
+       { "GFP_NOFAIL",                 "NF" },
+       { "GFP_NORETRY",                "NR" },
+       { "GFP_COMP",                   "C" },
+       { "GFP_ZERO",                   "Z" },
+       { "GFP_NOMEMALLOC",             "NMA" },
+       { "GFP_MEMALLOC",               "MA" },
+       { "GFP_HARDWALL",               "HW" },
+       { "GFP_THISNODE",               "TN" },
+       { "GFP_RECLAIMABLE",            "RC" },
+       { "GFP_MOVABLE",                "M" },
+       { "GFP_NOTRACK",                "NT" },
+       { "GFP_NO_KSWAPD",              "NK" },
+       { "GFP_OTHER_NODE",             "ON" },
+       { "GFP_NOWAIT",                 "NW" },
+};
+
+static size_t max_gfp_len;
+
+static char *compact_gfp_flags(char *gfp_flags)
+{
+       char *orig_flags = strdup(gfp_flags);
+       char *new_flags = NULL;
+       char *str, *pos = NULL;
+       size_t len = 0;
+
+       if (orig_flags == NULL)
+               return NULL;
+
+       str = strtok_r(orig_flags, "|", &pos);
+       while (str) {
+               size_t i;
+               char *new;
+               const char *cpt;
+
+               for (i = 0; i < ARRAY_SIZE(gfp_compact_table); i++) {
+                       if (strcmp(gfp_compact_table[i].original, str))
+                               continue;
+
+                       cpt = gfp_compact_table[i].compact;
+                       new = realloc(new_flags, len + strlen(cpt) + 2);
+                       if (new == NULL) {
+                               free(new_flags);
+                               return NULL;
+                       }
+
+                       new_flags = new;
+
+                       if (!len) {
+                               strcpy(new_flags, cpt);
+                       } else {
+                               strcat(new_flags, "|");
+                               strcat(new_flags, cpt);
+                               len++;
+                       }
+
+                       len += strlen(cpt);
+               }
+
+               str = strtok_r(NULL, "|", &pos);
+       }
+
+       if (max_gfp_len < len)
+               max_gfp_len = len;
+
+       free(orig_flags);
+       return new_flags;
+}
+
+static char *compact_gfp_string(unsigned long gfp_flags)
+{
+       struct gfp_flag key = {
+               .flags = gfp_flags,
+       };
+       struct gfp_flag *gfp;
+
+       gfp = bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp);
+       if (gfp)
+               return gfp->compact_str;
+
+       return NULL;
+}
+
+static int parse_gfp_flags(struct perf_evsel *evsel, struct perf_sample *sample,
+                          unsigned int gfp_flags)
+{
+       struct pevent_record record = {
+               .cpu = sample->cpu,
+               .data = sample->raw_data,
+               .size = sample->raw_size,
+       };
+       struct trace_seq seq;
+       char *str, *pos = NULL;
+
+       if (nr_gfps) {
+               struct gfp_flag key = {
+                       .flags = gfp_flags,
+               };
+
+               if (bsearch(&key, gfps, nr_gfps, sizeof(*gfps), gfpcmp))
+                       return 0;
+       }
+
+       trace_seq_init(&seq);
+       pevent_event_info(&seq, evsel->tp_format, &record);
+
+       str = strtok_r(seq.buffer, " ", &pos);
+       while (str) {
+               if (!strncmp(str, "gfp_flags=", 10)) {
+                       struct gfp_flag *new;
+
+                       new = realloc(gfps, (nr_gfps + 1) * sizeof(*gfps));
+                       if (new == NULL)
+                               return -ENOMEM;
+
+                       gfps = new;
+                       new += nr_gfps++;
+
+                       new->flags = gfp_flags;
+                       new->human_readable = strdup(str + 10);
+                       new->compact_str = compact_gfp_flags(str + 10);
+                       if (!new->human_readable || !new->compact_str)
+                               return -ENOMEM;
+
+                       qsort(gfps, nr_gfps, sizeof(*gfps), gfpcmp);
+               }
+
+               str = strtok_r(NULL, " ", &pos);
+       }
+
+       trace_seq_destroy(&seq);
+       return 0;
+}
+
 static int perf_evsel__process_page_alloc_event(struct perf_evsel *evsel,
                                                struct perf_sample *sample)
 {
@@ -375,6 +764,7 @@ static int perf_evsel__process_page_alloc_event(struct perf_evsel *evsel,
        unsigned int migrate_type = perf_evsel__intval(evsel, sample,
                                                       "migratetype");
        u64 bytes = kmem_page_size << order;
+       u64 callsite;
        struct page_stat *pstat;
        struct page_stat this = {
                .order = order,
@@ -397,20 +787,36 @@ static int perf_evsel__process_page_alloc_event(struct perf_evsel *evsel,
                return 0;
        }
 
+       if (parse_gfp_flags(evsel, sample, gfp_flags) < 0)
+               return -1;
+
+       callsite = find_callsite(evsel, sample);
+
        /*
         * This is to find the current page (with correct gfp flags and
         * migrate type) at free event.
         */
-       pstat = search_page(page, true);
+       this.page = page;
+       pstat = page_stat__findnew_page(&this);
        if (pstat == NULL)
                return -ENOMEM;
 
-       pstat->order = order;
-       pstat->gfp_flags = gfp_flags;
-       pstat->migrate_type = migrate_type;
+       pstat->nr_alloc++;
+       pstat->alloc_bytes += bytes;
+       pstat->callsite = callsite;
+
+       if (!live_page) {
+               pstat = page_stat__findnew_alloc(&this);
+               if (pstat == NULL)
+                       return -ENOMEM;
 
-       this.page = page;
-       pstat = search_page_alloc_stat(&this, true);
+               pstat->nr_alloc++;
+               pstat->alloc_bytes += bytes;
+               pstat->callsite = callsite;
+       }
+
+       this.callsite = callsite;
+       pstat = page_stat__findnew_caller(&this);
        if (pstat == NULL)
                return -ENOMEM;
 
@@ -441,7 +847,8 @@ static int perf_evsel__process_page_free_event(struct perf_evsel *evsel,
        nr_page_frees++;
        total_page_free_bytes += bytes;
 
-       pstat = search_page(page, false);
+       this.page = page;
+       pstat = page_stat__find_page(&this);
        if (pstat == NULL) {
                pr_debug2("missing free at page %"PRIx64" (order: %d)\n",
                          page, order);
@@ -452,20 +859,41 @@ static int perf_evsel__process_page_free_event(struct perf_evsel *evsel,
                return 0;
        }
 
-       this.page = page;
        this.gfp_flags = pstat->gfp_flags;
        this.migrate_type = pstat->migrate_type;
+       this.callsite = pstat->callsite;
 
-       rb_erase(&pstat->node, &page_tree);
+       rb_erase(&pstat->node, &page_live_tree);
        free(pstat);
 
-       pstat = search_page_alloc_stat(&this, false);
+       if (live_page) {
+               order_stats[this.order][this.migrate_type]--;
+       } else {
+               pstat = page_stat__find_alloc(&this);
+               if (pstat == NULL)
+                       return -ENOMEM;
+
+               pstat->nr_free++;
+               pstat->free_bytes += bytes;
+       }
+
+       pstat = page_stat__find_caller(&this);
        if (pstat == NULL)
                return -ENOENT;
 
        pstat->nr_free++;
        pstat->free_bytes += bytes;
 
+       if (live_page) {
+               pstat->nr_alloc--;
+               pstat->alloc_bytes -= bytes;
+
+               if (pstat->nr_alloc == 0) {
+                       rb_erase(&pstat->node, &page_caller_tree);
+                       free(pstat);
+               }
+       }
+
        return 0;
 }
 
@@ -478,6 +906,7 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
                                struct perf_evsel *evsel,
                                struct machine *machine)
 {
+       int err = 0;
        struct thread *thread = machine__findnew_thread(machine, sample->pid,
                                                        sample->tid);
 
@@ -491,10 +920,12 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
 
        if (evsel->handler != NULL) {
                tracepoint_handler f = evsel->handler;
-               return f(evsel, sample);
+               err = f(evsel, sample);
        }
 
-       return 0;
+       thread__put(thread);
+
+       return err;
 }
 
 static struct perf_tool perf_kmem = {
@@ -576,41 +1007,111 @@ static const char * const migrate_type_str[] = {
        "UNKNOWN",
 };
 
-static void __print_page_result(struct rb_root *root,
-                               struct perf_session *session __maybe_unused,
-                               int n_lines)
+static void __print_page_alloc_result(struct perf_session *session, int n_lines)
 {
-       struct rb_node *next = rb_first(root);
+       struct rb_node *next = rb_first(&page_alloc_sorted);
+       struct machine *machine = &session->machines.host;
        const char *format;
+       int gfp_len = max(strlen("GFP flags"), max_gfp_len);
 
-       printf("\n%.80s\n", graph_dotted_line);
-       printf(" %-16s | Total alloc (KB) | Hits      | Order | Mig.type | GFP flags\n",
-              use_pfn ? "PFN" : "Page");
-       printf("%.80s\n", graph_dotted_line);
+       printf("\n%.105s\n", graph_dotted_line);
+       printf(" %-16s | %5s alloc (KB) | Hits      | Order | Mig.type | %-*s | Callsite\n",
+              use_pfn ? "PFN" : "Page", live_page ? "Live" : "Total",
+              gfp_len, "GFP flags");
+       printf("%.105s\n", graph_dotted_line);
 
        if (use_pfn)
-               format = " %16llu | %'16llu | %'9d | %5d | %8s |  %08lx\n";
+               format = " %16llu | %'16llu | %'9d | %5d | %8s | %-*s | %s\n";
        else
-               format = " %016llx | %'16llu | %'9d | %5d | %8s |  %08lx\n";
+               format = " %016llx | %'16llu | %'9d | %5d | %8s | %-*s | %s\n";
 
        while (next && n_lines--) {
                struct page_stat *data;
+               struct symbol *sym;
+               struct map *map;
+               char buf[32];
+               char *caller = buf;
 
                data = rb_entry(next, struct page_stat, node);
+               sym = machine__find_kernel_function(machine, data->callsite,
+                                                   &map, NULL);
+               if (sym && sym->name)
+                       caller = sym->name;
+               else
+                       scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
 
                printf(format, (unsigned long long)data->page,
                       (unsigned long long)data->alloc_bytes / 1024,
                       data->nr_alloc, data->order,
                       migrate_type_str[data->migrate_type],
-                      (unsigned long)data->gfp_flags);
+                      gfp_len, compact_gfp_string(data->gfp_flags), caller);
 
                next = rb_next(next);
        }
 
-       if (n_lines == -1)
-               printf(" ...              | ...              | ...       | ...   | ...      | ...     \n");
+       if (n_lines == -1) {
+               printf(" ...              | ...              | ...       | ...   | ...      | %-*s | ...\n",
+                      gfp_len, "...");
+       }
+
+       printf("%.105s\n", graph_dotted_line);
+}
+
+static void __print_page_caller_result(struct perf_session *session, int n_lines)
+{
+       struct rb_node *next = rb_first(&page_caller_sorted);
+       struct machine *machine = &session->machines.host;
+       int gfp_len = max(strlen("GFP flags"), max_gfp_len);
+
+       printf("\n%.105s\n", graph_dotted_line);
+       printf(" %5s alloc (KB) | Hits      | Order | Mig.type | %-*s | Callsite\n",
+              live_page ? "Live" : "Total", gfp_len, "GFP flags");
+       printf("%.105s\n", graph_dotted_line);
+
+       while (next && n_lines--) {
+               struct page_stat *data;
+               struct symbol *sym;
+               struct map *map;
+               char buf[32];
+               char *caller = buf;
+
+               data = rb_entry(next, struct page_stat, node);
+               sym = machine__find_kernel_function(machine, data->callsite,
+                                                   &map, NULL);
+               if (sym && sym->name)
+                       caller = sym->name;
+               else
+                       scnprintf(buf, sizeof(buf), "%"PRIx64, data->callsite);
+
+               printf(" %'16llu | %'9d | %5d | %8s | %-*s | %s\n",
+                      (unsigned long long)data->alloc_bytes / 1024,
+                      data->nr_alloc, data->order,
+                      migrate_type_str[data->migrate_type],
+                      gfp_len, compact_gfp_string(data->gfp_flags), caller);
+
+               next = rb_next(next);
+       }
+
+       if (n_lines == -1) {
+               printf(" ...              | ...       | ...   | ...      | %-*s | ...\n",
+                      gfp_len, "...");
+       }
 
-       printf("%.80s\n", graph_dotted_line);
+       printf("%.105s\n", graph_dotted_line);
+}
+
+static void print_gfp_flags(void)
+{
+       int i;
+
+       printf("#\n");
+       printf("# GFP flags\n");
+       printf("# ---------\n");
+       for (i = 0; i < nr_gfps; i++) {
+               printf("# %08x: %*s: %s\n", gfps[i].flags,
+                      (int) max_gfp_len, gfps[i].compact_str,
+                      gfps[i].human_readable);
+       }
 }
 
 static void print_slab_summary(void)
@@ -682,8 +1183,12 @@ static void print_slab_result(struct perf_session *session)
 
 static void print_page_result(struct perf_session *session)
 {
+       if (caller_flag || alloc_flag)
+               print_gfp_flags();
+       if (caller_flag)
+               __print_page_caller_result(session, caller_lines);
        if (alloc_flag)
-               __print_page_result(&page_alloc_sorted, session, alloc_lines);
+               __print_page_alloc_result(session, alloc_lines);
        print_page_summary();
 }
 
@@ -695,14 +1200,10 @@ static void print_result(struct perf_session *session)
                print_page_result(session);
 }
 
-struct sort_dimension {
-       const char              name[20];
-       sort_fn_t               cmp;
-       struct list_head        list;
-};
-
-static LIST_HEAD(caller_sort);
-static LIST_HEAD(alloc_sort);
+static LIST_HEAD(slab_caller_sort);
+static LIST_HEAD(slab_alloc_sort);
+static LIST_HEAD(page_caller_sort);
+static LIST_HEAD(page_alloc_sort);
 
 static void sort_slab_insert(struct rb_root *root, struct alloc_stat *data,
                             struct list_head *sort_list)
@@ -751,10 +1252,12 @@ static void __sort_slab_result(struct rb_root *root, struct rb_root *root_sorted
        }
 }
 
-static void sort_page_insert(struct rb_root *root, struct page_stat *data)
+static void sort_page_insert(struct rb_root *root, struct page_stat *data,
+                            struct list_head *sort_list)
 {
        struct rb_node **new = &root->rb_node;
        struct rb_node *parent = NULL;
+       struct sort_dimension *sort;
 
        while (*new) {
                struct page_stat *this;
@@ -763,8 +1266,11 @@ static void sort_page_insert(struct rb_root *root, struct page_stat *data)
                this = rb_entry(*new, struct page_stat, node);
                parent = *new;
 
-               /* TODO: support more sort key */
-               cmp = data->alloc_bytes - this->alloc_bytes;
+               list_for_each_entry(sort, sort_list, list) {
+                       cmp = sort->cmp(data, this);
+                       if (cmp)
+                               break;
+               }
 
                if (cmp > 0)
                        new = &parent->rb_left;
@@ -776,7 +1282,8 @@ static void sort_page_insert(struct rb_root *root, struct page_stat *data)
        rb_insert_color(&data->node, root);
 }
 
-static void __sort_page_result(struct rb_root *root, struct rb_root *root_sorted)
+static void __sort_page_result(struct rb_root *root, struct rb_root *root_sorted,
+                              struct list_head *sort_list)
 {
        struct rb_node *node;
        struct page_stat *data;
@@ -788,7 +1295,7 @@ static void __sort_page_result(struct rb_root *root, struct rb_root *root_sorted
 
                rb_erase(node, root);
                data = rb_entry(node, struct page_stat, node);
-               sort_page_insert(root_sorted, data);
+               sort_page_insert(root_sorted, data, sort_list);
        }
 }
 
@@ -796,12 +1303,20 @@ static void sort_result(void)
 {
        if (kmem_slab) {
                __sort_slab_result(&root_alloc_stat, &root_alloc_sorted,
-                                  &alloc_sort);
+                                  &slab_alloc_sort);
                __sort_slab_result(&root_caller_stat, &root_caller_sorted,
-                                  &caller_sort);
+                                  &slab_caller_sort);
        }
        if (kmem_page) {
-               __sort_page_result(&page_alloc_tree, &page_alloc_sorted);
+               if (live_page)
+                       __sort_page_result(&page_live_tree, &page_alloc_sorted,
+                                          &page_alloc_sort);
+               else
+                       __sort_page_result(&page_alloc_tree, &page_alloc_sorted,
+                                          &page_alloc_sort);
+
+               __sort_page_result(&page_caller_tree, &page_caller_sorted,
+                                  &page_caller_sort);
        }
 }
 
@@ -850,8 +1365,12 @@ out:
        return err;
 }
 
-static int ptr_cmp(struct alloc_stat *l, struct alloc_stat *r)
+/* slab sort keys */
+static int ptr_cmp(void *a, void *b)
 {
+       struct alloc_stat *l = a;
+       struct alloc_stat *r = b;
+
        if (l->ptr < r->ptr)
                return -1;
        else if (l->ptr > r->ptr)
@@ -864,8 +1383,11 @@ static struct sort_dimension ptr_sort_dimension = {
        .cmp    = ptr_cmp,
 };
 
-static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r)
+static int slab_callsite_cmp(void *a, void *b)
 {
+       struct alloc_stat *l = a;
+       struct alloc_stat *r = b;
+
        if (l->call_site < r->call_site)
                return -1;
        else if (l->call_site > r->call_site)
@@ -875,11 +1397,14 @@ static int callsite_cmp(struct alloc_stat *l, struct alloc_stat *r)
 
 static struct sort_dimension callsite_sort_dimension = {
        .name   = "callsite",
-       .cmp    = callsite_cmp,
+       .cmp    = slab_callsite_cmp,
 };
 
-static int hit_cmp(struct alloc_stat *l, struct alloc_stat *r)
+static int hit_cmp(void *a, void *b)
 {
+       struct alloc_stat *l = a;
+       struct alloc_stat *r = b;
+
        if (l->hit < r->hit)
                return -1;
        else if (l->hit > r->hit)
@@ -892,8 +1417,11 @@ static struct sort_dimension hit_sort_dimension = {
        .cmp    = hit_cmp,
 };
 
-static int bytes_cmp(struct alloc_stat *l, struct alloc_stat *r)
+static int bytes_cmp(void *a, void *b)
 {
+       struct alloc_stat *l = a;
+       struct alloc_stat *r = b;
+
        if (l->bytes_alloc < r->bytes_alloc)
                return -1;
        else if (l->bytes_alloc > r->bytes_alloc)
@@ -906,9 +1434,11 @@ static struct sort_dimension bytes_sort_dimension = {
        .cmp    = bytes_cmp,
 };
 
-static int frag_cmp(struct alloc_stat *l, struct alloc_stat *r)
+static int frag_cmp(void *a, void *b)
 {
        double x, y;
+       struct alloc_stat *l = a;
+       struct alloc_stat *r = b;
 
        x = fragmentation(l->bytes_req, l->bytes_alloc);
        y = fragmentation(r->bytes_req, r->bytes_alloc);
@@ -925,8 +1455,11 @@ static struct sort_dimension frag_sort_dimension = {
        .cmp    = frag_cmp,
 };
 
-static int pingpong_cmp(struct alloc_stat *l, struct alloc_stat *r)
+static int pingpong_cmp(void *a, void *b)
 {
+       struct alloc_stat *l = a;
+       struct alloc_stat *r = b;
+
        if (l->pingpong < r->pingpong)
                return -1;
        else if (l->pingpong > r->pingpong)
@@ -939,7 +1472,135 @@ static struct sort_dimension pingpong_sort_dimension = {
        .cmp    = pingpong_cmp,
 };
 
-static struct sort_dimension *avail_sorts[] = {
+/* page sort keys */
+static int page_cmp(void *a, void *b)
+{
+       struct page_stat *l = a;
+       struct page_stat *r = b;
+
+       if (l->page < r->page)
+               return -1;
+       else if (l->page > r->page)
+               return 1;
+       return 0;
+}
+
+static struct sort_dimension page_sort_dimension = {
+       .name   = "page",
+       .cmp    = page_cmp,
+};
+
+static int page_callsite_cmp(void *a, void *b)
+{
+       struct page_stat *l = a;
+       struct page_stat *r = b;
+
+       if (l->callsite < r->callsite)
+               return -1;
+       else if (l->callsite > r->callsite)
+               return 1;
+       return 0;
+}
+
+static struct sort_dimension page_callsite_sort_dimension = {
+       .name   = "callsite",
+       .cmp    = page_callsite_cmp,
+};
+
+static int page_hit_cmp(void *a, void *b)
+{
+       struct page_stat *l = a;
+       struct page_stat *r = b;
+
+       if (l->nr_alloc < r->nr_alloc)
+               return -1;
+       else if (l->nr_alloc > r->nr_alloc)
+               return 1;
+       return 0;
+}
+
+static struct sort_dimension page_hit_sort_dimension = {
+       .name   = "hit",
+       .cmp    = page_hit_cmp,
+};
+
+static int page_bytes_cmp(void *a, void *b)
+{
+       struct page_stat *l = a;
+       struct page_stat *r = b;
+
+       if (l->alloc_bytes < r->alloc_bytes)
+               return -1;
+       else if (l->alloc_bytes > r->alloc_bytes)
+               return 1;
+       return 0;
+}
+
+static struct sort_dimension page_bytes_sort_dimension = {
+       .name   = "bytes",
+       .cmp    = page_bytes_cmp,
+};
+
+static int page_order_cmp(void *a, void *b)
+{
+       struct page_stat *l = a;
+       struct page_stat *r = b;
+
+       if (l->order < r->order)
+               return -1;
+       else if (l->order > r->order)
+               return 1;
+       return 0;
+}
+
+static struct sort_dimension page_order_sort_dimension = {
+       .name   = "order",
+       .cmp    = page_order_cmp,
+};
+
+static int migrate_type_cmp(void *a, void *b)
+{
+       struct page_stat *l = a;
+       struct page_stat *r = b;
+
+       /* for internal use to find free'd page */
+       if (l->migrate_type == -1U)
+               return 0;
+
+       if (l->migrate_type < r->migrate_type)
+               return -1;
+       else if (l->migrate_type > r->migrate_type)
+               return 1;
+       return 0;
+}
+
+static struct sort_dimension migrate_type_sort_dimension = {
+       .name   = "migtype",
+       .cmp    = migrate_type_cmp,
+};
+
+static int gfp_flags_cmp(void *a, void *b)
+{
+       struct page_stat *l = a;
+       struct page_stat *r = b;
+
+       /* for internal use to find free'd page */
+       if (l->gfp_flags == -1U)
+               return 0;
+
+       if (l->gfp_flags < r->gfp_flags)
+               return -1;
+       else if (l->gfp_flags > r->gfp_flags)
+               return 1;
+       return 0;
+}
+
+static struct sort_dimension gfp_flags_sort_dimension = {
+       .name   = "gfp",
+       .cmp    = gfp_flags_cmp,
+};
+
+static struct sort_dimension *slab_sorts[] = {
        &ptr_sort_dimension,
        &callsite_sort_dimension,
        &hit_sort_dimension,
@@ -948,16 +1609,44 @@ static struct sort_dimension *avail_sorts[] = {
        &pingpong_sort_dimension,
 };
 
-#define NUM_AVAIL_SORTS        ((int)ARRAY_SIZE(avail_sorts))
+static struct sort_dimension *page_sorts[] = {
+       &page_sort_dimension,
+       &page_callsite_sort_dimension,
+       &page_hit_sort_dimension,
+       &page_bytes_sort_dimension,
+       &page_order_sort_dimension,
+       &migrate_type_sort_dimension,
+       &gfp_flags_sort_dimension,
+};
+
+static int slab_sort_dimension__add(const char *tok, struct list_head *list)
+{
+       struct sort_dimension *sort;
+       int i;
+
+       for (i = 0; i < (int)ARRAY_SIZE(slab_sorts); i++) {
+               if (!strcmp(slab_sorts[i]->name, tok)) {
+                       sort = memdup(slab_sorts[i], sizeof(*slab_sorts[i]));
+                       if (!sort) {
+                               pr_err("%s: memdup failed\n", __func__);
+                               return -1;
+                       }
+                       list_add_tail(&sort->list, list);
+                       return 0;
+               }
+       }
+
+       return -1;
+}
 
-static int sort_dimension__add(const char *tok, struct list_head *list)
+static int page_sort_dimension__add(const char *tok, struct list_head *list)
 {
        struct sort_dimension *sort;
        int i;
 
-       for (i = 0; i < NUM_AVAIL_SORTS; i++) {
-               if (!strcmp(avail_sorts[i]->name, tok)) {
-                       sort = memdup(avail_sorts[i], sizeof(*avail_sorts[i]));
+       for (i = 0; i < (int)ARRAY_SIZE(page_sorts); i++) {
+               if (!strcmp(page_sorts[i]->name, tok)) {
+                       sort = memdup(page_sorts[i], sizeof(*page_sorts[i]));
                        if (!sort) {
                                pr_err("%s: memdup failed\n", __func__);
                                return -1;
@@ -970,7 +1659,33 @@ static int sort_dimension__add(const char *tok, struct list_head *list)
        return -1;
 }
 
-static int setup_sorting(struct list_head *sort_list, const char *arg)
+static int setup_slab_sorting(struct list_head *sort_list, const char *arg)
+{
+       char *tok;
+       char *str = strdup(arg);
+       char *pos = str;
+
+       if (!str) {
+               pr_err("%s: strdup failed\n", __func__);
+               return -1;
+       }
+
+       while (true) {
+               tok = strsep(&pos, ",");
+               if (!tok)
+                       break;
+               if (slab_sort_dimension__add(tok, sort_list) < 0) {
+                       error("Unknown slab --sort key: '%s'", tok);
+                       free(str);
+                       return -1;
+               }
+       }
+
+       free(str);
+       return 0;
+}
+
+static int setup_page_sorting(struct list_head *sort_list, const char *arg)
 {
        char *tok;
        char *str = strdup(arg);
@@ -985,8 +1700,8 @@ static int setup_sorting(struct list_head *sort_list, const char *arg)
                tok = strsep(&pos, ",");
                if (!tok)
                        break;
-               if (sort_dimension__add(tok, sort_list) < 0) {
-                       error("Unknown --sort key: '%s'", tok);
+               if (page_sort_dimension__add(tok, sort_list) < 0) {
+                       error("Unknown page --sort key: '%s'", tok);
                        free(str);
                        return -1;
                }
@@ -1002,10 +1717,18 @@ static int parse_sort_opt(const struct option *opt __maybe_unused,
        if (!arg)
                return -1;
 
-       if (caller_flag > alloc_flag)
-               return setup_sorting(&caller_sort, arg);
-       else
-               return setup_sorting(&alloc_sort, arg);
+       if (kmem_page > kmem_slab ||
+           (kmem_page == 0 && kmem_slab == 0 && kmem_default == KMEM_PAGE)) {
+               if (caller_flag > alloc_flag)
+                       return setup_page_sorting(&page_caller_sort, arg);
+               else
+                       return setup_page_sorting(&page_alloc_sort, arg);
+       } else {
+               if (caller_flag > alloc_flag)
+                       return setup_slab_sorting(&slab_caller_sort, arg);
+               else
+                       return setup_slab_sorting(&slab_alloc_sort, arg);
+       }
 
        return 0;
 }
@@ -1084,7 +1807,7 @@ static int __cmd_record(int argc, const char **argv)
        if (kmem_slab)
                rec_argc += ARRAY_SIZE(slab_events);
        if (kmem_page)
-               rec_argc += ARRAY_SIZE(page_events);
+               rec_argc += ARRAY_SIZE(page_events) + 1; /* for -g */
 
        rec_argv = calloc(rec_argc + 1, sizeof(char *));
 
@@ -1099,6 +1822,8 @@ static int __cmd_record(int argc, const char **argv)
                        rec_argv[i] = strdup(slab_events[j]);
        }
        if (kmem_page) {
+               rec_argv[i++] = strdup("-g");
+
                for (j = 0; j < ARRAY_SIZE(page_events); j++, i++)
                        rec_argv[i] = strdup(page_events[j]);
        }
@@ -1109,9 +1834,26 @@ static int __cmd_record(int argc, const char **argv)
        return cmd_record(i, rec_argv, NULL);
 }
 
+static int kmem_config(const char *var, const char *value, void *cb)
+{
+       if (!strcmp(var, "kmem.default")) {
+               if (!strcmp(value, "slab"))
+                       kmem_default = KMEM_SLAB;
+               else if (!strcmp(value, "page"))
+                       kmem_default = KMEM_PAGE;
+               else
+                       pr_err("invalid default value ('slab' or 'page' required): %s\n",
+                              value);
+               return 0;
+       }
+
+       return perf_default_config(var, value, cb);
+}
+
 int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
 {
-       const char * const default_sort_order = "frag,hit,bytes";
+       const char * const default_slab_sort = "frag,hit,bytes";
+       const char * const default_page_sort = "bytes,hit";
        struct perf_data_file file = {
                .mode = PERF_DATA_MODE_READ,
        };
@@ -1124,8 +1866,8 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
        OPT_CALLBACK_NOOPT(0, "alloc", NULL, NULL,
                           "show per-allocation statistics", parse_alloc_opt),
        OPT_CALLBACK('s', "sort", NULL, "key[,key2...]",
-                    "sort by keys: ptr, call_site, bytes, hit, pingpong, frag",
-                    parse_sort_opt),
+                    "sort by keys: ptr, callsite, bytes, hit, pingpong, frag, "
+                    "page, order, migtype, gfp", parse_sort_opt),
        OPT_CALLBACK('l', "line", NULL, "num", "show n lines", parse_line_opt),
        OPT_BOOLEAN(0, "raw-ip", &raw_ip, "show raw ip instead of symbol"),
        OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"),
@@ -1133,6 +1875,7 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
                           parse_slab_opt),
        OPT_CALLBACK_NOOPT(0, "page", NULL, NULL, "Analyze page allocator",
                           parse_page_opt),
+       OPT_BOOLEAN(0, "live", &live_page, "Show live page stat"),
        OPT_END()
        };
        const char *const kmem_subcommands[] = { "record", "stat", NULL };
@@ -1142,15 +1885,21 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
        };
        struct perf_session *session;
        int ret = -1;
+       const char errmsg[] = "No %s allocation events found.  Have you run 'perf kmem record --%s'?\n";
 
+       perf_config(kmem_config, NULL);
        argc = parse_options_subcommand(argc, argv, kmem_options,
                                        kmem_subcommands, kmem_usage, 0);
 
        if (!argc)
                usage_with_options(kmem_usage, kmem_options);
 
-       if (kmem_slab == 0 && kmem_page == 0)
-               kmem_slab = 1;  /* for backward compatibility */
+       if (kmem_slab == 0 && kmem_page == 0) {
+               if (kmem_default == KMEM_SLAB)
+                       kmem_slab = 1;
+               else
+                       kmem_page = 1;
+       }
 
        if (!strncmp(argv[0], "rec", 3)) {
                symbol__init(NULL);
@@ -1159,19 +1908,30 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
 
        file.path = input_name;
 
-       session = perf_session__new(&file, false, &perf_kmem);
+       kmem_session = session = perf_session__new(&file, false, &perf_kmem);
        if (session == NULL)
                return -1;
 
+       if (kmem_slab) {
+               if (!perf_evlist__find_tracepoint_by_name(session->evlist,
+                                                         "kmem:kmalloc")) {
+                       pr_err(errmsg, "slab", "slab");
+                       return -1;
+               }
+       }
+
        if (kmem_page) {
-               struct perf_evsel *evsel = perf_evlist__first(session->evlist);
+               struct perf_evsel *evsel;
 
-               if (evsel == NULL || evsel->tp_format == NULL) {
-                       pr_err("invalid event found.. aborting\n");
+               evsel = perf_evlist__find_tracepoint_by_name(session->evlist,
+                                                            "kmem:mm_page_alloc");
+               if (evsel == NULL) {
+                       pr_err(errmsg, "page", "page");
                        return -1;
                }
 
                kmem_page_size = pevent_get_page_size(evsel->tp_format->pevent);
+               symbol_conf.use_callchain = true;
        }
 
        symbol__init(&session->header.env);
@@ -1182,11 +1942,21 @@ int cmd_kmem(int argc, const char **argv, const char *prefix __maybe_unused)
                if (cpu__setup_cpunode_map())
                        goto out_delete;
 
-               if (list_empty(&caller_sort))
-                       setup_sorting(&caller_sort, default_sort_order);
-               if (list_empty(&alloc_sort))
-                       setup_sorting(&alloc_sort, default_sort_order);
-
+               if (list_empty(&slab_caller_sort))
+                       setup_slab_sorting(&slab_caller_sort, default_slab_sort);
+               if (list_empty(&slab_alloc_sort))
+                       setup_slab_sorting(&slab_alloc_sort, default_slab_sort);
+               if (list_empty(&page_caller_sort))
+                       setup_page_sorting(&page_caller_sort, default_page_sort);
+               if (list_empty(&page_alloc_sort))
+                       setup_page_sorting(&page_alloc_sort, default_page_sort);
+
+               if (kmem_page) {
+                       setup_page_sorting(&page_alloc_sort_input,
+                                          "page,order,migtype,gfp");
+                       setup_page_sorting(&page_caller_sort_input,
+                                          "callsite,order,migtype,gfp");
+               }
                ret = __cmd_kmem(session);
        } else
                usage_with_options(kmem_usage, kmem_options);
index 1f9338f6109cdbe79f8f08e510c00e4a171d1f82..74878cd75078055e437396fc9a6b201603586076 100644 (file)
@@ -651,6 +651,7 @@ static int process_sample_event(struct perf_tool *tool,
                                struct perf_evsel *evsel,
                                struct machine *machine)
 {
+       int err = 0;
        struct thread *thread;
        struct perf_kvm_stat *kvm = container_of(tool, struct perf_kvm_stat,
                                                 tool);
@@ -666,9 +667,10 @@ static int process_sample_event(struct perf_tool *tool,
        }
 
        if (!handle_kvm_event(kvm, thread, evsel, sample))
-               return -1;
+               err = -1;
 
-       return 0;
+       thread__put(thread);
+       return err;
 }
 
 static int cpu_isa_config(struct perf_kvm_stat *kvm)
@@ -1309,6 +1311,8 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
                        "show events other than"
                        " HLT (x86 only) or Wait state (s390 only)"
                        " that take longer than duration usecs"),
+               OPT_UINTEGER(0, "proc-map-timeout", &kvm->opts.proc_map_timeout,
+                               "per thread proc mmap processing timeout in ms"),
                OPT_END()
        };
        const char * const live_usage[] = {
@@ -1336,6 +1340,7 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
        kvm->opts.target.uses_mmap = false;
        kvm->opts.target.uid_str = NULL;
        kvm->opts.target.uid = UINT_MAX;
+       kvm->opts.proc_map_timeout = 500;
 
        symbol__init(NULL);
        disable_buildid_cache();
@@ -1391,7 +1396,7 @@ static int kvm_events_live(struct perf_kvm_stat *kvm,
        perf_session__set_id_hdr_size(kvm->session);
        ordered_events__set_copy_on_queue(&kvm->session->ordered_events, true);
        machine__synthesize_threads(&kvm->session->machines.host, &kvm->opts.target,
-                                   kvm->evlist->threads, false);
+                                   kvm->evlist->threads, false, kvm->opts.proc_map_timeout);
        err = kvm_live_open_events(kvm);
        if (err)
                goto out;
index d49c2ab85fc2dd1e3c6560391b71f7e3269cf31e..de16aaed516e6016b2a8d887f87727a8179acf19 100644 (file)
@@ -769,6 +769,7 @@ static void dump_threads(void)
                t = perf_session__findnew(session, st->tid);
                pr_info("%10d: %s\n", st->tid, thread__comm_str(t));
                node = rb_next(node);
+               thread__put(t);
        };
 }
 
@@ -810,6 +811,7 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
                                struct perf_evsel *evsel,
                                struct machine *machine)
 {
+       int err = 0;
        struct thread *thread = machine__findnew_thread(machine, sample->pid,
                                                        sample->tid);
 
@@ -821,10 +823,12 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
 
        if (evsel->handler != NULL) {
                tracepoint_handler f = evsel->handler;
-               return f(evsel, sample);
+               err = f(evsel, sample);
        }
 
-       return 0;
+       thread__put(thread);
+
+       return err;
 }
 
 static void sort_result(void)
index 675216e08bfcd04baf2336ece7da328e914e21fd..da2ec06f0742dc6acf98c1c9b74d7cf45ff0fcb2 100644 (file)
@@ -74,7 +74,7 @@ dump_raw_samples(struct perf_tool *tool,
        }
 
        if (al.filtered || (mem->hide_unresolved && al.sym == NULL))
-               return 0;
+               goto out_put;
 
        if (al.map != NULL)
                al.map->dso->hit = 1;
@@ -103,7 +103,8 @@ dump_raw_samples(struct perf_tool *tool,
                symbol_conf.field_sep,
                al.map ? (al.map->dso ? al.map->dso->long_name : "???") : "???",
                al.sym ? al.sym->name : "???");
-
+out_put:
+       addr_location__put(&al);
        return 0;
 }
 
index f7b1af67e9f686d86f8bd1539b96a935b04a4559..1272559fa22d9eb60367f34594ebd18a7e76e8d3 100644 (file)
 
 #define DEFAULT_VAR_FILTER "!__k???tab_* & !__crc_*"
 #define DEFAULT_FUNC_FILTER "!_*"
+#define DEFAULT_LIST_FILTER "*:*"
 
 /* Session management structure */
 static struct {
+       int command;    /* Command short_name */
        bool list_events;
-       bool force_add;
-       bool show_lines;
-       bool show_vars;
-       bool show_ext_vars;
-       bool show_funcs;
-       bool mod_events;
        bool uprobes;
        bool quiet;
        bool target_used;
        int nevents;
        struct perf_probe_event events[MAX_PROBES];
-       struct strlist *dellist;
        struct line_range line_range;
        char *target;
-       int max_probe_points;
        struct strfilter *filter;
 } params;
 
@@ -93,6 +87,28 @@ static int parse_probe_event(const char *str)
        return ret;
 }
 
+static int params_add_filter(const char *str)
+{
+       const char *err = NULL;
+       int ret = 0;
+
+       pr_debug2("Add filter: %s\n", str);
+       if (!params.filter) {
+               params.filter = strfilter__new(str, &err);
+               if (!params.filter)
+                       ret = err ? -EINVAL : -ENOMEM;
+       } else
+               ret = strfilter__or(params.filter, str, &err);
+
+       if (ret == -EINVAL) {
+               pr_err("Filter parse error at %td.\n", err - str + 1);
+               pr_err("Source: \"%s\"\n", str);
+               pr_err("         %*c\n", (int)(err - str + 1), '^');
+       }
+
+       return ret;
+}
+
 static int set_target(const char *ptr)
 {
        int found = 0;
@@ -152,34 +168,11 @@ static int parse_probe_event_argv(int argc, const char **argv)
 
                len += sprintf(&buf[len], "%s ", argv[i]);
        }
-       params.mod_events = true;
        ret = parse_probe_event(buf);
        free(buf);
        return ret;
 }
 
-static int opt_add_probe_event(const struct option *opt __maybe_unused,
-                             const char *str, int unset __maybe_unused)
-{
-       if (str) {
-               params.mod_events = true;
-               return parse_probe_event(str);
-       } else
-               return 0;
-}
-
-static int opt_del_probe_event(const struct option *opt __maybe_unused,
-                              const char *str, int unset __maybe_unused)
-{
-       if (str) {
-               params.mod_events = true;
-               if (!params.dellist)
-                       params.dellist = strlist__new(true, NULL);
-               strlist__add(params.dellist, str);
-       }
-       return 0;
-}
-
 static int opt_set_target(const struct option *opt, const char *str,
                        int unset __maybe_unused)
 {
@@ -217,8 +210,10 @@ static int opt_set_target(const struct option *opt, const char *str,
        return ret;
 }
 
+/* Command option callbacks */
+
 #ifdef HAVE_DWARF_SUPPORT
-static int opt_show_lines(const struct option *opt __maybe_unused,
+static int opt_show_lines(const struct option *opt,
                          const char *str, int unset __maybe_unused)
 {
        int ret = 0;
@@ -226,19 +221,19 @@ static int opt_show_lines(const struct option *opt __maybe_unused,
        if (!str)
                return 0;
 
-       if (params.show_lines) {
+       if (params.command == 'L') {
                pr_warning("Warning: more than one --line options are"
                           " detected. Only the first one is valid.\n");
                return 0;
        }
 
-       params.show_lines = true;
+       params.command = opt->short_name;
        ret = parse_line_range_desc(str, &params.line_range);
 
        return ret;
 }
 
-static int opt_show_vars(const struct option *opt __maybe_unused,
+static int opt_show_vars(const struct option *opt,
                         const char *str, int unset __maybe_unused)
 {
        struct perf_probe_event *pev = &params.events[params.nevents];
@@ -252,29 +247,39 @@ static int opt_show_vars(const struct option *opt __maybe_unused,
                pr_err("  Error: '--vars' doesn't accept arguments.\n");
                return -EINVAL;
        }
-       params.show_vars = true;
+       params.command = opt->short_name;
 
        return ret;
 }
 #endif
+static int opt_add_probe_event(const struct option *opt,
+                             const char *str, int unset __maybe_unused)
+{
+       if (str) {
+               params.command = opt->short_name;
+               return parse_probe_event(str);
+       }
+
+       return 0;
+}
+
+static int opt_set_filter_with_command(const struct option *opt,
+                                      const char *str, int unset)
+{
+       if (!unset)
+               params.command = opt->short_name;
+
+       if (str)
+               return params_add_filter(str);
+
+       return 0;
+}
 
 static int opt_set_filter(const struct option *opt __maybe_unused,
                          const char *str, int unset __maybe_unused)
 {
-       const char *err;
-
-       if (str) {
-               pr_debug2("Set filter: %s\n", str);
-               if (params.filter)
-                       strfilter__delete(params.filter);
-               params.filter = strfilter__new(str, &err);
-               if (!params.filter) {
-                       pr_err("Filter parse error at %td.\n", err - str + 1);
-                       pr_err("Source: \"%s\"\n", str);
-                       pr_err("         %*c\n", (int)(err - str + 1), '^');
-                       return -EINVAL;
-               }
-       }
+       if (str)
+               return params_add_filter(str);
 
        return 0;
 }
@@ -290,8 +295,6 @@ static void cleanup_params(void)
 
        for (i = 0; i < params.nevents; i++)
                clear_perf_probe_event(params.events + i);
-       if (params.dellist)
-               strlist__delete(params.dellist);
        line_range__clear(&params.line_range);
        free(params.target);
        if (params.filter)
@@ -316,22 +319,24 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
                "perf probe [<options>] 'PROBEDEF' ['PROBEDEF' ...]",
                "perf probe [<options>] --add 'PROBEDEF' [--add 'PROBEDEF' ...]",
                "perf probe [<options>] --del '[GROUP:]EVENT' ...",
-               "perf probe --list",
+               "perf probe --list [GROUP:]EVENT ...",
 #ifdef HAVE_DWARF_SUPPORT
                "perf probe [<options>] --line 'LINEDESC'",
                "perf probe [<options>] --vars 'PROBEPOINT'",
 #endif
+               "perf probe [<options>] --funcs",
                NULL
-};
+       };
        struct option options[] = {
        OPT_INCR('v', "verbose", &verbose,
                    "be more verbose (show parsed arguments, etc)"),
        OPT_BOOLEAN('q', "quiet", &params.quiet,
                    "be quiet (do not show any mesages)"),
-       OPT_BOOLEAN('l', "list", &params.list_events,
-                   "list up current probe events"),
+       OPT_CALLBACK_DEFAULT('l', "list", NULL, "[GROUP:]EVENT",
+                            "list up probe events",
+                            opt_set_filter_with_command, DEFAULT_LIST_FILTER),
        OPT_CALLBACK('d', "del", NULL, "[GROUP:]EVENT", "delete a probe event.",
-               opt_del_probe_event),
+                    opt_set_filter_with_command),
        OPT_CALLBACK('a', "add", NULL,
 #ifdef HAVE_DWARF_SUPPORT
                "[EVENT=]FUNC[@SRC][+OFF|%return|:RL|;PT]|SRC:AL|SRC;PT"
@@ -356,7 +361,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
                "\t\tARG:\tProbe argument (kprobe-tracer argument format.)\n",
 #endif
                opt_add_probe_event),
-       OPT_BOOLEAN('f', "force", &params.force_add, "forcibly add events"
+       OPT_BOOLEAN('f', "force", &probe_conf.force_add, "forcibly add events"
                    " with existing name"),
 #ifdef HAVE_DWARF_SUPPORT
        OPT_CALLBACK('L', "line", NULL,
@@ -365,8 +370,10 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
        OPT_CALLBACK('V', "vars", NULL,
                     "FUNC[@SRC][+OFF|%return|:RL|;PT]|SRC:AL|SRC;PT",
                     "Show accessible variables on PROBEDEF", opt_show_vars),
-       OPT_BOOLEAN('\0', "externs", &params.show_ext_vars,
+       OPT_BOOLEAN('\0', "externs", &probe_conf.show_ext_vars,
                    "Show external variables too (with --vars only)"),
+       OPT_BOOLEAN('\0', "range", &probe_conf.show_location_range,
+               "Show variables location range in scope (with --vars only)"),
        OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
                   "file", "vmlinux pathname"),
        OPT_STRING('s', "source", &symbol_conf.source_prefix,
@@ -374,12 +381,15 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
        OPT_CALLBACK('m', "module", NULL, "modname|path",
                "target module name (for online) or path (for offline)",
                opt_set_target),
+       OPT_BOOLEAN('\0', "no-inlines", &probe_conf.no_inlines,
+               "Don't search inlined functions"),
 #endif
        OPT__DRY_RUN(&probe_event_dry_run),
-       OPT_INTEGER('\0', "max-probes", &params.max_probe_points,
+       OPT_INTEGER('\0', "max-probes", &probe_conf.max_probes,
                 "Set how many probe points can be found for a probe."),
-       OPT_BOOLEAN('F', "funcs", &params.show_funcs,
-                   "Show potential probe-able functions."),
+       OPT_CALLBACK_DEFAULT('F', "funcs", NULL, "[FILTER]",
+                            "Show potential probe-able functions.",
+                            opt_set_filter_with_command, DEFAULT_FUNC_FILTER),
        OPT_CALLBACK('\0', "filter", NULL,
                     "[!]FILTER", "Set a filter (with --vars/funcs only)\n"
                     "\t\t\t(default: \"" DEFAULT_VAR_FILTER "\" for --vars,\n"
@@ -402,6 +412,7 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
        set_option_flag(options, 'L', "line", PARSE_OPT_EXCLUSIVE);
        set_option_flag(options, 'V', "vars", PARSE_OPT_EXCLUSIVE);
 #endif
+       set_option_flag(options, 'F', "funcs", PARSE_OPT_EXCLUSIVE);
 
        argc = parse_options(argc, argv, options, probe_usage,
                             PARSE_OPT_STOP_AT_NON_OPTION);
@@ -410,11 +421,16 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
                        pr_warning("  Error: '-' is not supported.\n");
                        usage_with_options(probe_usage, options);
                }
+               if (params.command && params.command != 'a') {
+                       pr_warning("  Error: another command except --add is set.\n");
+                       usage_with_options(probe_usage, options);
+               }
                ret = parse_probe_event_argv(argc, argv);
                if (ret < 0) {
                        pr_err_with_code("  Error: Command Parse Error.", ret);
                        return ret;
                }
+               params.command = 'a';
        }
 
        if (params.quiet) {
@@ -425,89 +441,70 @@ __cmd_probe(int argc, const char **argv, const char *prefix __maybe_unused)
                verbose = -1;
        }
 
-       if (params.max_probe_points == 0)
-               params.max_probe_points = MAX_PROBES;
-
-       if ((!params.nevents && !params.dellist && !params.list_events &&
-            !params.show_lines && !params.show_funcs))
-               usage_with_options(probe_usage, options);
+       if (probe_conf.max_probes == 0)
+               probe_conf.max_probes = MAX_PROBES;
 
        /*
         * Only consider the user's kernel image path if given.
         */
        symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
 
-       if (params.list_events) {
+       switch (params.command) {
+       case 'l':
                if (params.uprobes) {
                        pr_warning("  Error: Don't use --list with --exec.\n");
                        usage_with_options(probe_usage, options);
                }
-               ret = show_perf_probe_events();
+               ret = show_perf_probe_events(params.filter);
                if (ret < 0)
                        pr_err_with_code("  Error: Failed to show event list.", ret);
                return ret;
-       }
-       if (params.show_funcs) {
-               if (!params.filter)
-                       params.filter = strfilter__new(DEFAULT_FUNC_FILTER,
-                                                      NULL);
+       case 'F':
                ret = show_available_funcs(params.target, params.filter,
                                        params.uprobes);
-               strfilter__delete(params.filter);
-               params.filter = NULL;
                if (ret < 0)
                        pr_err_with_code("  Error: Failed to show functions.", ret);
                return ret;
-       }
-
 #ifdef HAVE_DWARF_SUPPORT
-       if (params.show_lines) {
+       case 'L':
                ret = show_line_range(&params.line_range, params.target,
                                      params.uprobes);
                if (ret < 0)
                        pr_err_with_code("  Error: Failed to show lines.", ret);
                return ret;
-       }
-       if (params.show_vars) {
+       case 'V':
                if (!params.filter)
                        params.filter = strfilter__new(DEFAULT_VAR_FILTER,
                                                       NULL);
 
                ret = show_available_vars(params.events, params.nevents,
-                                         params.max_probe_points,
-                                         params.target,
-                                         params.filter,
-                                         params.show_ext_vars);
-               strfilter__delete(params.filter);
-               params.filter = NULL;
+                                         params.filter);
                if (ret < 0)
                        pr_err_with_code("  Error: Failed to show vars.", ret);
                return ret;
-       }
 #endif
-
-       if (params.dellist) {
-               ret = del_perf_probe_events(params.dellist);
+       case 'd':
+               ret = del_perf_probe_events(params.filter);
                if (ret < 0) {
                        pr_err_with_code("  Error: Failed to delete events.", ret);
                        return ret;
                }
-       }
-
-       if (params.nevents) {
+               break;
+       case 'a':
                /* Ensure the last given target is used */
                if (params.target && !params.target_used) {
                        pr_warning("  Error: -x/-m must follow the probe definitions.\n");
                        usage_with_options(probe_usage, options);
                }
 
-               ret = add_perf_probe_events(params.events, params.nevents,
-                                           params.max_probe_points,
-                                           params.force_add);
+               ret = add_perf_probe_events(params.events, params.nevents);
                if (ret < 0) {
                        pr_err_with_code("  Error: Failed to add events.", ret);
                        return ret;
                }
+               break;
+       default:
+               usage_with_options(probe_usage, options);
        }
        return 0;
 }
@@ -522,5 +519,5 @@ int cmd_probe(int argc, const char **argv, const char *prefix)
                cleanup_params();
        }
 
-       return ret;
+       return ret < 0 ? ret : 0;
 }
index c3efdfb630b5b664349ed9e40c41374d3863752d..de165a1b92402ac7a6267bd0a0c5aa30a0053c92 100644 (file)
@@ -27,6 +27,8 @@
 #include "util/cpumap.h"
 #include "util/thread_map.h"
 #include "util/data.h"
+#include "util/auxtrace.h"
+#include "util/parse-branch-options.h"
 
 #include <unistd.h>
 #include <sched.h>
@@ -38,6 +40,7 @@ struct record {
        struct record_opts      opts;
        u64                     bytes_written;
        struct perf_data_file   file;
+       struct auxtrace_record  *itr;
        struct perf_evlist      *evlist;
        struct perf_session     *session;
        const char              *progname;
@@ -110,9 +113,12 @@ out:
        return rc;
 }
 
-static volatile int done = 0;
+static volatile int done;
 static volatile int signr = -1;
-static volatile int child_finished = 0;
+static volatile int child_finished;
+static volatile int auxtrace_snapshot_enabled;
+static volatile int auxtrace_snapshot_err;
+static volatile int auxtrace_record__snapshot_started;
 
 static void sig_handler(int sig)
 {
@@ -133,6 +139,133 @@ static void record__sig_exit(void)
        raise(signr);
 }
 
+#ifdef HAVE_AUXTRACE_SUPPORT
+
+static int record__process_auxtrace(struct perf_tool *tool,
+                                   union perf_event *event, void *data1,
+                                   size_t len1, void *data2, size_t len2)
+{
+       struct record *rec = container_of(tool, struct record, tool);
+       struct perf_data_file *file = &rec->file;
+       size_t padding;
+       u8 pad[8] = {0};
+
+       if (!perf_data_file__is_pipe(file)) {
+               off_t file_offset;
+               int fd = perf_data_file__fd(file);
+               int err;
+
+               file_offset = lseek(fd, 0, SEEK_CUR);
+               if (file_offset == -1)
+                       return -1;
+               err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
+                                                    event, file_offset);
+               if (err)
+                       return err;
+       }
+
+       /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
+       padding = (len1 + len2) & 7;
+       if (padding)
+               padding = 8 - padding;
+
+       record__write(rec, event, event->header.size);
+       record__write(rec, data1, len1);
+       if (len2)
+               record__write(rec, data2, len2);
+       record__write(rec, &pad, padding);
+
+       return 0;
+}
+
+static int record__auxtrace_mmap_read(struct record *rec,
+                                     struct auxtrace_mmap *mm)
+{
+       int ret;
+
+       ret = auxtrace_mmap__read(mm, rec->itr, &rec->tool,
+                                 record__process_auxtrace);
+       if (ret < 0)
+               return ret;
+
+       if (ret)
+               rec->samples++;
+
+       return 0;
+}
+
+static int record__auxtrace_mmap_read_snapshot(struct record *rec,
+                                              struct auxtrace_mmap *mm)
+{
+       int ret;
+
+       ret = auxtrace_mmap__read_snapshot(mm, rec->itr, &rec->tool,
+                                          record__process_auxtrace,
+                                          rec->opts.auxtrace_snapshot_size);
+       if (ret < 0)
+               return ret;
+
+       if (ret)
+               rec->samples++;
+
+       return 0;
+}
+
+static int record__auxtrace_read_snapshot_all(struct record *rec)
+{
+       int i;
+       int rc = 0;
+
+       for (i = 0; i < rec->evlist->nr_mmaps; i++) {
+               struct auxtrace_mmap *mm =
+                               &rec->evlist->mmap[i].auxtrace_mmap;
+
+               if (!mm->base)
+                       continue;
+
+               if (record__auxtrace_mmap_read_snapshot(rec, mm) != 0) {
+                       rc = -1;
+                       goto out;
+               }
+       }
+out:
+       return rc;
+}
+
+static void record__read_auxtrace_snapshot(struct record *rec)
+{
+       pr_debug("Recording AUX area tracing snapshot\n");
+       if (record__auxtrace_read_snapshot_all(rec) < 0) {
+               auxtrace_snapshot_err = -1;
+       } else {
+               auxtrace_snapshot_err = auxtrace_record__snapshot_finish(rec->itr);
+               if (!auxtrace_snapshot_err)
+                       auxtrace_snapshot_enabled = 1;
+       }
+}
+
+#else
+
+static inline
+int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
+                              struct auxtrace_mmap *mm __maybe_unused)
+{
+       return 0;
+}
+
+static inline
+void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
+{
+}
+
+static inline
+int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
+{
+       return 0;
+}
+
+#endif
+
 static int record__open(struct record *rec)
 {
        char msg[512];
@@ -169,13 +302,16 @@ try_again:
                goto out;
        }
 
-       if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
+       if (perf_evlist__mmap_ex(evlist, opts->mmap_pages, false,
+                                opts->auxtrace_mmap_pages,
+                                opts->auxtrace_snapshot_mode) < 0) {
                if (errno == EPERM) {
                        pr_err("Permission error mapping pages.\n"
                               "Consider increasing "
                               "/proc/sys/kernel/perf_event_mlock_kb,\n"
                               "or try again with a smaller value of -m/--mmap_pages.\n"
-                              "(current value: %u)\n", opts->mmap_pages);
+                              "(current value: %u,%u)\n",
+                              opts->mmap_pages, opts->auxtrace_mmap_pages);
                        rc = -errno;
                } else {
                        pr_err("failed to mmap with %d (%s)\n", errno,
@@ -209,12 +345,9 @@ static int process_buildids(struct record *rec)
        struct perf_data_file *file  = &rec->file;
        struct perf_session *session = rec->session;
 
-       u64 size = lseek(perf_data_file__fd(file), 0, SEEK_CUR);
-       if (size == 0)
+       if (file->size == 0)
                return 0;
 
-       file->size = size;
-
        /*
         * During this process, it'll load kernel map and replace the
         * dso->long_name to a real pathname it found.  In this case
@@ -270,12 +403,20 @@ static int record__mmap_read_all(struct record *rec)
        int rc = 0;
 
        for (i = 0; i < rec->evlist->nr_mmaps; i++) {
+               struct auxtrace_mmap *mm = &rec->evlist->mmap[i].auxtrace_mmap;
+
                if (rec->evlist->mmap[i].base) {
                        if (record__mmap_read(rec, i) != 0) {
                                rc = -1;
                                goto out;
                        }
                }
+
+               if (mm->base && !rec->opts.auxtrace_snapshot_mode &&
+                   record__auxtrace_mmap_read(rec, mm) != 0) {
+                       rc = -1;
+                       goto out;
+               }
        }
 
        /*
@@ -305,6 +446,9 @@ static void record__init_features(struct record *rec)
 
        if (!rec->opts.branch_stack)
                perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
+
+       if (!rec->opts.full_auxtrace)
+               perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
 }
 
 static volatile int workload_exec_errno;
@@ -323,6 +467,8 @@ static void workload_exec_failed_signal(int signo __maybe_unused,
        child_finished = 1;
 }
 
+static void snapshot_sig_handler(int sig);
+
 static int __cmd_record(struct record *rec, int argc, const char **argv)
 {
        int err;
@@ -343,6 +489,10 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
        signal(SIGCHLD, sig_handler);
        signal(SIGINT, sig_handler);
        signal(SIGTERM, sig_handler);
+       if (rec->opts.auxtrace_snapshot_mode)
+               signal(SIGUSR2, snapshot_sig_handler);
+       else
+               signal(SIGUSR2, SIG_IGN);
 
        session = perf_session__new(file, false, tool);
        if (session == NULL) {
@@ -421,6 +571,13 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
                }
        }
 
+       if (rec->opts.full_auxtrace) {
+               err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
+                                       session, process_synthesized_event);
+               if (err)
+                       goto out_delete_session;
+       }
+
        err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
                                                 machine);
        if (err < 0)
@@ -441,7 +598,8 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
        }
 
        err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
-                                           process_synthesized_event, opts->sample_address);
+                                           process_synthesized_event, opts->sample_address,
+                                           opts->proc_map_timeout);
        if (err != 0)
                goto out_child;
 
@@ -475,14 +633,27 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
                perf_evlist__enable(rec->evlist);
        }
 
+       auxtrace_snapshot_enabled = 1;
        for (;;) {
                int hits = rec->samples;
 
                if (record__mmap_read_all(rec) < 0) {
+                       auxtrace_snapshot_enabled = 0;
                        err = -1;
                        goto out_child;
                }
 
+               if (auxtrace_record__snapshot_started) {
+                       auxtrace_record__snapshot_started = 0;
+                       if (!auxtrace_snapshot_err)
+                               record__read_auxtrace_snapshot(rec);
+                       if (auxtrace_snapshot_err) {
+                               pr_err("AUX area tracing snapshot failed\n");
+                               err = -1;
+                               goto out_child;
+                       }
+               }
+
                if (hits == rec->samples) {
                        if (done || draining)
                                break;
@@ -505,10 +676,12 @@ static int __cmd_record(struct record *rec, int argc, const char **argv)
                 * disable events in this case.
                 */
                if (done && !disabled && !target__none(&opts->target)) {
+                       auxtrace_snapshot_enabled = 0;
                        perf_evlist__disable(rec->evlist);
                        disabled = true;
                }
        }
+       auxtrace_snapshot_enabled = 0;
 
        if (forks && workload_exec_errno) {
                char msg[STRERR_BUFSIZE];
@@ -544,16 +717,25 @@ out_child:
 
        if (!err && !file->is_pipe) {
                rec->session->header.data_size += rec->bytes_written;
+               file->size = lseek(perf_data_file__fd(file), 0, SEEK_CUR);
 
-               if (!rec->no_buildid)
+               if (!rec->no_buildid) {
                        process_buildids(rec);
+                       /*
+                        * We take all buildids when the file contains
+                        * AUX area tracing data because we do not decode the
+                        * trace because it would take too long.
+                        */
+                       if (rec->opts.full_auxtrace)
+                               dsos__hit_all(rec->session);
+               }
                perf_session__write_header(rec->session, rec->evlist, fd, true);
        }
 
        if (!err && !quiet) {
                char samples[128];
 
-               if (rec->samples)
+               if (rec->samples && !rec->opts.full_auxtrace)
                        scnprintf(samples, sizeof(samples),
                                  " (%" PRIu64 " samples)", rec->samples);
                else
@@ -569,94 +751,6 @@ out_delete_session:
        return status;
 }
 
-#define BRANCH_OPT(n, m) \
-       { .name = n, .mode = (m) }
-
-#define BRANCH_END { .name = NULL }
-
-struct branch_mode {
-       const char *name;
-       int mode;
-};
-
-static const struct branch_mode branch_modes[] = {
-       BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
-       BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
-       BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
-       BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
-       BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
-       BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
-       BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
-       BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
-       BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
-       BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
-       BRANCH_OPT("cond", PERF_SAMPLE_BRANCH_COND),
-       BRANCH_END
-};
-
-static int
-parse_branch_stack(const struct option *opt, const char *str, int unset)
-{
-#define ONLY_PLM \
-       (PERF_SAMPLE_BRANCH_USER        |\
-        PERF_SAMPLE_BRANCH_KERNEL      |\
-        PERF_SAMPLE_BRANCH_HV)
-
-       uint64_t *mode = (uint64_t *)opt->value;
-       const struct branch_mode *br;
-       char *s, *os = NULL, *p;
-       int ret = -1;
-
-       if (unset)
-               return 0;
-
-       /*
-        * cannot set it twice, -b + --branch-filter for instance
-        */
-       if (*mode)
-               return -1;
-
-       /* str may be NULL in case no arg is passed to -b */
-       if (str) {
-               /* because str is read-only */
-               s = os = strdup(str);
-               if (!s)
-                       return -1;
-
-               for (;;) {
-                       p = strchr(s, ',');
-                       if (p)
-                               *p = '\0';
-
-                       for (br = branch_modes; br->name; br++) {
-                               if (!strcasecmp(s, br->name))
-                                       break;
-                       }
-                       if (!br->name) {
-                               ui__warning("unknown branch filter %s,"
-                                           " check man page\n", s);
-                               goto error;
-                       }
-
-                       *mode |= br->mode;
-
-                       if (!p)
-                               break;
-
-                       s = p + 1;
-               }
-       }
-       ret = 0;
-
-       /* default to any branch */
-       if ((*mode & ~ONLY_PLM) == 0) {
-               *mode = PERF_SAMPLE_BRANCH_ANY;
-       }
-error:
-       free(os);
-       return ret;
-}
-
 static void callchain_debug(void)
 {
        static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
@@ -795,6 +889,49 @@ static int parse_clockid(const struct option *opt, const char *str, int unset)
        return -1;
 }
 
+static int record__parse_mmap_pages(const struct option *opt,
+                                   const char *str,
+                                   int unset __maybe_unused)
+{
+       struct record_opts *opts = opt->value;
+       char *s, *p;
+       unsigned int mmap_pages;
+       int ret;
+
+       if (!str)
+               return -EINVAL;
+
+       s = strdup(str);
+       if (!s)
+               return -ENOMEM;
+
+       p = strchr(s, ',');
+       if (p)
+               *p = '\0';
+
+       if (*s) {
+               ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
+               if (ret)
+                       goto out_free;
+               opts->mmap_pages = mmap_pages;
+       }
+
+       if (!p) {
+               ret = 0;
+               goto out_free;
+       }
+
+       ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
+       if (ret)
+               goto out_free;
+
+       opts->auxtrace_mmap_pages = mmap_pages;
+
+out_free:
+       free(s);
+       return ret;
+}
+
 static const char * const __record_usage[] = {
        "perf record [<options>] [<command>]",
        "perf record [<options>] -- <command> [<options>]",
@@ -823,6 +960,7 @@ static struct record record = {
                        .uses_mmap   = true,
                        .default_per_cpu = true,
                },
+               .proc_map_timeout     = 500,
        },
        .tool = {
                .sample         = process_sample_event,
@@ -875,9 +1013,9 @@ struct option __record_options[] = {
                        &record.opts.no_inherit_set,
                        "child tasks do not inherit counters"),
        OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
-       OPT_CALLBACK('m', "mmap-pages", &record.opts.mmap_pages, "pages",
-                    "number of mmap data pages",
-                    perf_evlist__parse_mmap_pages),
+       OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
+                    "number of mmap data pages and AUX area tracing mmap pages",
+                    record__parse_mmap_pages),
        OPT_BOOLEAN(0, "group", &record.opts.group,
                    "put the counters into a counter group"),
        OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
@@ -891,10 +1029,9 @@ struct option __record_options[] = {
        OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
        OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
                    "per thread counts"),
-       OPT_BOOLEAN('d', "data", &record.opts.sample_address,
-                   "Sample addresses"),
-       OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
-       OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
+       OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
+       OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Record the sample timestamps"),
+       OPT_BOOLEAN('P', "period", &record.opts.period, "Record the sample period"),
        OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
                    "don't sample"),
        OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
@@ -929,6 +1066,10 @@ struct option __record_options[] = {
        OPT_CALLBACK('k', "clockid", &record.opts,
        "clockid", "clockid to use for events, see clock_gettime()",
        parse_clockid),
+       OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
+                         "opts", "AUX area tracing Snapshot Mode", ""),
+       OPT_UINTEGER(0, "proc-map-timeout", &record.opts.proc_map_timeout,
+                       "per thread proc mmap processing timeout in ms"),
        OPT_END()
 };
 
@@ -936,7 +1077,7 @@ struct option *record_options = __record_options;
 
 int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
 {
-       int err = -ENOMEM;
+       int err;
        struct record *rec = &record;
        char errbuf[BUFSIZ];
 
@@ -957,6 +1098,19 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
                usage_with_options(record_usage, record_options);
        }
 
+       if (!rec->itr) {
+               rec->itr = auxtrace_record__init(rec->evlist, &err);
+               if (err)
+                       return err;
+       }
+
+       err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
+                                             rec->opts.auxtrace_snapshot_opts);
+       if (err)
+               return err;
+
+       err = -ENOMEM;
+
        symbol__init(NULL);
 
        if (symbol_conf.kptr_restrict)
@@ -1002,6 +1156,10 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
        if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
                usage_with_options(record_usage, record_options);
 
+       err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
+       if (err)
+               goto out_symbol_exit;
+
        if (record_opts__config(&rec->opts)) {
                err = -EINVAL;
                goto out_symbol_exit;
@@ -1011,5 +1169,15 @@ int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
 out_symbol_exit:
        perf_evlist__delete(rec->evlist);
        symbol__exit();
+       auxtrace_record__free(rec->itr);
        return err;
 }
+
+static void snapshot_sig_handler(int sig __maybe_unused)
+{
+       if (!auxtrace_snapshot_enabled)
+               return;
+       auxtrace_snapshot_enabled = 0;
+       auxtrace_snapshot_err = auxtrace_record__snapshot_start(record.itr);
+       auxtrace_record__snapshot_started = 1;
+}
index b63aeda719be0c7604da5229e1a3a0ec33253400..32626ea3e2276b11279db88207e42c29eeed391a 100644 (file)
@@ -36,6 +36,8 @@
 #include "util/data.h"
 #include "arch/common.h"
 
+#include "util/auxtrace.h"
+
 #include <dlfcn.h>
 #include <linux/bitmap.h>
 
@@ -137,10 +139,12 @@ static int process_sample_event(struct perf_tool *tool,
        struct report *rep = container_of(tool, struct report, tool);
        struct addr_location al;
        struct hist_entry_iter iter = {
-               .hide_unresolved = rep->hide_unresolved,
-               .add_entry_cb = hist_iter__report_callback,
+               .evsel                  = evsel,
+               .sample                 = sample,
+               .hide_unresolved        = rep->hide_unresolved,
+               .add_entry_cb           = hist_iter__report_callback,
        };
-       int ret;
+       int ret = 0;
 
        if (perf_event__preprocess_sample(event, machine, &al, sample) < 0) {
                pr_debug("problem processing %d event, skipping it.\n",
@@ -149,10 +153,10 @@ static int process_sample_event(struct perf_tool *tool,
        }
 
        if (rep->hide_unresolved && al.sym == NULL)
-               return 0;
+               goto out_put;
 
        if (rep->cpu_list && !test_bit(sample->cpu, rep->cpu_bitmap))
-               return 0;
+               goto out_put;
 
        if (sort__mode == SORT_MODE__BRANCH)
                iter.ops = &hist_iter_branch;
@@ -166,11 +170,11 @@ static int process_sample_event(struct perf_tool *tool,
        if (al.map != NULL)
                al.map->dso->hit = 1;
 
-       ret = hist_entry_iter__add(&iter, &al, evsel, sample, rep->max_stack,
-                                  rep);
+       ret = hist_entry_iter__add(&iter, &al, rep->max_stack, rep);
        if (ret < 0)
                pr_debug("problem adding hist entry, skipping event\n");
-
+out_put:
+       addr_location__put(&al);
        return ret;
 }
 
@@ -316,6 +320,7 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
 {
        struct perf_evsel *pos;
 
+       fprintf(stdout, "#\n# Total Lost Samples: %" PRIu64 "\n#\n", evlist->stats.total_lost_samples);
        evlist__for_each(evlist, pos) {
                struct hists *hists = evsel__hists(pos);
                const char *evname = perf_evsel__name(pos);
@@ -330,15 +335,14 @@ static int perf_evlist__tty_browse_hists(struct perf_evlist *evlist,
        }
 
        if (sort_order == NULL &&
-           parent_pattern == default_parent_pattern) {
+           parent_pattern == default_parent_pattern)
                fprintf(stdout, "#\n# (%s)\n#\n", help);
 
-               if (rep->show_threads) {
-                       bool style = !strcmp(rep->pretty_printing_style, "raw");
-                       perf_read_values_display(stdout, &rep->show_threads_values,
-                                                style);
-                       perf_read_values_destroy(&rep->show_threads_values);
-               }
+       if (rep->show_threads) {
+               bool style = !strcmp(rep->pretty_printing_style, "raw");
+               perf_read_values_display(stdout, &rep->show_threads_values,
+                                        style);
+               perf_read_values_destroy(&rep->show_threads_values);
        }
 
        return 0;
@@ -585,6 +589,7 @@ parse_percent_limit(const struct option *opt, const char *str,
 int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
 {
        struct perf_session *session;
+       struct itrace_synth_opts itrace_synth_opts = { .set = 0, };
        struct stat st;
        bool has_br_stack = false;
        int branch_mode = -1;
@@ -607,6 +612,9 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
                        .attr            = perf_event__process_attr,
                        .tracing_data    = perf_event__process_tracing_data,
                        .build_id        = perf_event__process_build_id,
+                       .id_index        = perf_event__process_id_index,
+                       .auxtrace_info   = perf_event__process_auxtrace_info,
+                       .auxtrace        = perf_event__process_auxtrace,
                        .ordered_events  = true,
                        .ordering_requires_timestamps = true,
                },
@@ -717,6 +725,9 @@ int cmd_report(int argc, const char **argv, const char *prefix __maybe_unused)
                     "Don't show entries under that percent", parse_percent_limit),
        OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
                     "how to display percentage of filtered entries", parse_filter_percentage),
+       OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
+                           "Instruction Tracing options",
+                           itrace_parse_synth_opts),
        OPT_END()
        };
        struct perf_data_file file = {
@@ -761,6 +772,8 @@ repeat:
                                               report.queue_size);
        }
 
+       session->itrace_synth_opts = &itrace_synth_opts;
+
        report.session = session;
 
        has_br_stack = perf_header__has_feat(&session->header,
@@ -803,8 +816,8 @@ repeat:
                goto error;
        }
 
-       /* Force tty output for header output. */
-       if (report.header || report.header_only)
+       /* Force tty output for header output and per-thread stat. */
+       if (report.header || report.header_only || report.show_threads)
                use_browser = 0;
 
        if (strcmp(input_name, "-") != 0)
index 5275bab703138cbeb9c40f1ff22174ac52ca2d13..33962612a5e9035ae42c83e15497a5713922556b 100644 (file)
@@ -95,6 +95,7 @@ struct work_atoms {
        u64                     total_lat;
        u64                     nb_atoms;
        u64                     total_runtime;
+       int                     num_merged;
 };
 
 typedef int (*sort_fn_t)(struct work_atoms *, struct work_atoms *);
@@ -168,9 +169,10 @@ struct perf_sched {
        u64              all_runtime;
        u64              all_count;
        u64              cpu_last_switched[MAX_CPUS];
-       struct rb_root   atom_root, sorted_atom_root;
+       struct rb_root   atom_root, sorted_atom_root, merged_atom_root;
        struct list_head sort_list, cmp_pid;
        bool force;
+       bool skip_merge;
 };
 
 static u64 get_nsecs(void)
@@ -770,7 +772,7 @@ static int replay_fork_event(struct perf_sched *sched,
        if (child == NULL || parent == NULL) {
                pr_debug("thread does not exist on fork event: child %p, parent %p\n",
                                 child, parent);
-               return 0;
+               goto out_put;
        }
 
        if (verbose) {
@@ -781,6 +783,9 @@ static int replay_fork_event(struct perf_sched *sched,
 
        register_pid(sched, parent->tid, thread__comm_str(parent));
        register_pid(sched, child->tid, thread__comm_str(child));
+out_put:
+       thread__put(child);
+       thread__put(parent);
        return 0;
 }
 
@@ -957,7 +962,7 @@ static int latency_switch_event(struct perf_sched *sched,
        struct work_atoms *out_events, *in_events;
        struct thread *sched_out, *sched_in;
        u64 timestamp0, timestamp = sample->time;
-       int cpu = sample->cpu;
+       int cpu = sample->cpu, err = -1;
        s64 delta;
 
        BUG_ON(cpu >= MAX_CPUS || cpu < 0);
@@ -976,15 +981,17 @@ static int latency_switch_event(struct perf_sched *sched,
 
        sched_out = machine__findnew_thread(machine, -1, prev_pid);
        sched_in = machine__findnew_thread(machine, -1, next_pid);
+       if (sched_out == NULL || sched_in == NULL)
+               goto out_put;
 
        out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
        if (!out_events) {
                if (thread_atoms_insert(sched, sched_out))
-                       return -1;
+                       goto out_put;
                out_events = thread_atoms_search(&sched->atom_root, sched_out, &sched->cmp_pid);
                if (!out_events) {
                        pr_err("out-event: Internal tree error");
-                       return -1;
+                       goto out_put;
                }
        }
        if (add_sched_out_event(out_events, sched_out_state(prev_state), timestamp))
@@ -993,22 +1000,25 @@ static int latency_switch_event(struct perf_sched *sched,
        in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
        if (!in_events) {
                if (thread_atoms_insert(sched, sched_in))
-                       return -1;
+                       goto out_put;
                in_events = thread_atoms_search(&sched->atom_root, sched_in, &sched->cmp_pid);
                if (!in_events) {
                        pr_err("in-event: Internal tree error");
-                       return -1;
+                       goto out_put;
                }
                /*
                 * Take came in we have not heard about yet,
                 * add in an initial atom in runnable state:
                 */
                if (add_sched_out_event(in_events, 'R', timestamp))
-                       return -1;
+                       goto out_put;
        }
        add_sched_in_event(in_events, timestamp);
-
-       return 0;
+       err = 0;
+out_put:
+       thread__put(sched_out);
+       thread__put(sched_in);
+       return err;
 }
 
 static int latency_runtime_event(struct perf_sched *sched,
@@ -1021,23 +1031,29 @@ static int latency_runtime_event(struct perf_sched *sched,
        struct thread *thread = machine__findnew_thread(machine, -1, pid);
        struct work_atoms *atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
        u64 timestamp = sample->time;
-       int cpu = sample->cpu;
+       int cpu = sample->cpu, err = -1;
+
+       if (thread == NULL)
+               return -1;
 
        BUG_ON(cpu >= MAX_CPUS || cpu < 0);
        if (!atoms) {
                if (thread_atoms_insert(sched, thread))
-                       return -1;
+                       goto out_put;
                atoms = thread_atoms_search(&sched->atom_root, thread, &sched->cmp_pid);
                if (!atoms) {
                        pr_err("in-event: Internal tree error");
-                       return -1;
+                       goto out_put;
                }
                if (add_sched_out_event(atoms, 'R', timestamp))
-                       return -1;
+                       goto out_put;
        }
 
        add_runtime_event(atoms, runtime, timestamp);
-       return 0;
+       err = 0;
+out_put:
+       thread__put(thread);
+       return err;
 }
 
 static int latency_wakeup_event(struct perf_sched *sched,
@@ -1050,19 +1066,22 @@ static int latency_wakeup_event(struct perf_sched *sched,
        struct work_atom *atom;
        struct thread *wakee;
        u64 timestamp = sample->time;
+       int err = -1;
 
        wakee = machine__findnew_thread(machine, -1, pid);
+       if (wakee == NULL)
+               return -1;
        atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
        if (!atoms) {
                if (thread_atoms_insert(sched, wakee))
-                       return -1;
+                       goto out_put;
                atoms = thread_atoms_search(&sched->atom_root, wakee, &sched->cmp_pid);
                if (!atoms) {
                        pr_err("wakeup-event: Internal tree error");
-                       return -1;
+                       goto out_put;
                }
                if (add_sched_out_event(atoms, 'S', timestamp))
-                       return -1;
+                       goto out_put;
        }
 
        BUG_ON(list_empty(&atoms->work_list));
@@ -1081,17 +1100,21 @@ static int latency_wakeup_event(struct perf_sched *sched,
         * skip in this case.
         */
        if (sched->profile_cpu == -1 && atom->state != THREAD_SLEEPING)
-               return 0;
+               goto out_ok;
 
        sched->nr_timestamps++;
        if (atom->sched_out_time > timestamp) {
                sched->nr_unordered_timestamps++;
-               return 0;
+               goto out_ok;
        }
 
        atom->state = THREAD_WAIT_CPU;
        atom->wake_up_time = timestamp;
-       return 0;
+out_ok:
+       err = 0;
+out_put:
+       thread__put(wakee);
+       return err;
 }
 
 static int latency_migrate_task_event(struct perf_sched *sched,
@@ -1104,6 +1127,7 @@ static int latency_migrate_task_event(struct perf_sched *sched,
        struct work_atoms *atoms;
        struct work_atom *atom;
        struct thread *migrant;
+       int err = -1;
 
        /*
         * Only need to worry about migration when profiling one CPU.
@@ -1112,18 +1136,20 @@ static int latency_migrate_task_event(struct perf_sched *sched,
                return 0;
 
        migrant = machine__findnew_thread(machine, -1, pid);
+       if (migrant == NULL)
+               return -1;
        atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
        if (!atoms) {
                if (thread_atoms_insert(sched, migrant))
-                       return -1;
+                       goto out_put;
                register_pid(sched, migrant->tid, thread__comm_str(migrant));
                atoms = thread_atoms_search(&sched->atom_root, migrant, &sched->cmp_pid);
                if (!atoms) {
                        pr_err("migration-event: Internal tree error");
-                       return -1;
+                       goto out_put;
                }
                if (add_sched_out_event(atoms, 'R', timestamp))
-                       return -1;
+                       goto out_put;
        }
 
        BUG_ON(list_empty(&atoms->work_list));
@@ -1135,8 +1161,10 @@ static int latency_migrate_task_event(struct perf_sched *sched,
 
        if (atom->sched_out_time > timestamp)
                sched->nr_unordered_timestamps++;
-
-       return 0;
+       err = 0;
+out_put:
+       thread__put(migrant);
+       return err;
 }
 
 static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_list)
@@ -1156,7 +1184,10 @@ static void output_lat_thread(struct perf_sched *sched, struct work_atoms *work_
        sched->all_runtime += work_list->total_runtime;
        sched->all_count   += work_list->nb_atoms;
 
-       ret = printf("  %s:%d ", thread__comm_str(work_list->thread), work_list->thread->tid);
+       if (work_list->num_merged > 1)
+               ret = printf("  %s:(%d) ", thread__comm_str(work_list->thread), work_list->num_merged);
+       else
+               ret = printf("  %s:%d ", thread__comm_str(work_list->thread), work_list->thread->tid);
 
        for (i = 0; i < 24 - ret; i++)
                printf(" ");
@@ -1276,17 +1307,22 @@ static int sort_dimension__add(const char *tok, struct list_head *list)
 static void perf_sched__sort_lat(struct perf_sched *sched)
 {
        struct rb_node *node;
-
+       struct rb_root *root = &sched->atom_root;
+again:
        for (;;) {
                struct work_atoms *data;
-               node = rb_first(&sched->atom_root);
+               node = rb_first(root);
                if (!node)
                        break;
 
-               rb_erase(node, &sched->atom_root);
+               rb_erase(node, root);
                data = rb_entry(node, struct work_atoms, node);
                __thread_latency_insert(&sched->sorted_atom_root, data, &sched->sort_list);
        }
+       if (root == &sched->atom_root) {
+               root = &sched->merged_atom_root;
+               goto again;
+       }
 }
 
 static int process_sched_wakeup_event(struct perf_tool *tool,
@@ -1330,8 +1366,10 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
        }
 
        sched_in = machine__findnew_thread(machine, -1, next_pid);
+       if (sched_in == NULL)
+               return -1;
 
-       sched->curr_thread[this_cpu] = sched_in;
+       sched->curr_thread[this_cpu] = thread__get(sched_in);
 
        printf("  ");
 
@@ -1381,6 +1419,8 @@ static int map_switch_event(struct perf_sched *sched, struct perf_evsel *evsel,
                printf("\n");
        }
 
+       thread__put(sched_in);
+
        return 0;
 }
 
@@ -1542,6 +1582,59 @@ static void print_bad_events(struct perf_sched *sched)
        }
 }
 
+static void __merge_work_atoms(struct rb_root *root, struct work_atoms *data)
+{
+       struct rb_node **new = &(root->rb_node), *parent = NULL;
+       struct work_atoms *this;
+       const char *comm = thread__comm_str(data->thread), *this_comm;
+
+       while (*new) {
+               int cmp;
+
+               this = container_of(*new, struct work_atoms, node);
+               parent = *new;
+
+               this_comm = thread__comm_str(this->thread);
+               cmp = strcmp(comm, this_comm);
+               if (cmp > 0) {
+                       new = &((*new)->rb_left);
+               } else if (cmp < 0) {
+                       new = &((*new)->rb_right);
+               } else {
+                       this->num_merged++;
+                       this->total_runtime += data->total_runtime;
+                       this->nb_atoms += data->nb_atoms;
+                       this->total_lat += data->total_lat;
+                       list_splice(&data->work_list, &this->work_list);
+                       if (this->max_lat < data->max_lat) {
+                               this->max_lat = data->max_lat;
+                               this->max_lat_at = data->max_lat_at;
+                       }
+                       zfree(&data);
+                       return;
+               }
+       }
+
+       data->num_merged++;
+       rb_link_node(&data->node, parent, new);
+       rb_insert_color(&data->node, root);
+}
+
+static void perf_sched__merge_lat(struct perf_sched *sched)
+{
+       struct work_atoms *data;
+       struct rb_node *node;
+
+       if (sched->skip_merge)
+               return;
+
+       while ((node = rb_first(&sched->atom_root))) {
+               rb_erase(node, &sched->atom_root);
+               data = rb_entry(node, struct work_atoms, node);
+               __merge_work_atoms(&sched->merged_atom_root, data);
+       }
+}
+
 static int perf_sched__lat(struct perf_sched *sched)
 {
        struct rb_node *next;
@@ -1551,6 +1644,7 @@ static int perf_sched__lat(struct perf_sched *sched)
        if (perf_sched__read_events(sched))
                return -1;
 
+       perf_sched__merge_lat(sched);
        perf_sched__sort_lat(sched);
 
        printf("\n -----------------------------------------------------------------------------------------------------------------\n");
@@ -1702,6 +1796,7 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
                .profile_cpu          = -1,
                .next_shortname1      = 'A',
                .next_shortname2      = '0',
+               .skip_merge           = 0,
        };
        const struct option latency_options[] = {
        OPT_STRING('s', "sort", &sched.sort_order, "key[,key2...]",
@@ -1712,6 +1807,8 @@ int cmd_sched(int argc, const char **argv, const char *prefix __maybe_unused)
                    "CPU to profile on"),
        OPT_BOOLEAN('D', "dump-raw-trace", &dump_trace,
                    "dump raw trace in ASCII"),
+       OPT_BOOLEAN('p', "pids", &sched.skip_merge,
+                   "latency stats per pid instead of per comm"),
        OPT_END()
        };
        const struct option replay_options[] = {
index 58f10b8e6ff20d51429634b8f79628188fbc2dd4..24809787369f5a1303451de5264798d0e31792b6 100644 (file)
@@ -16,6 +16,7 @@
 #include "util/evsel.h"
 #include "util/sort.h"
 #include "util/data.h"
+#include "util/auxtrace.h"
 #include <linux/bitmap.h>
 
 static char const              *script_name;
@@ -26,6 +27,7 @@ static u64                    nr_unordered;
 static bool                    no_callchain;
 static bool                    latency_format;
 static bool                    system_wide;
+static bool                    print_flags;
 static const char              *cpu_list;
 static DECLARE_BITMAP(cpu_bitmap, MAX_NR_CPUS);
 
@@ -146,9 +148,10 @@ static const char *output_field2str(enum perf_output_field field)
 
 #define PRINT_FIELD(x)  (output[attr->type].fields & PERF_OUTPUT_##x)
 
-static int perf_evsel__check_stype(struct perf_evsel *evsel,
-                                  u64 sample_type, const char *sample_msg,
-                                  enum perf_output_field field)
+static int perf_evsel__do_check_stype(struct perf_evsel *evsel,
+                                     u64 sample_type, const char *sample_msg,
+                                     enum perf_output_field field,
+                                     bool allow_user_set)
 {
        struct perf_event_attr *attr = &evsel->attr;
        int type = attr->type;
@@ -158,6 +161,8 @@ static int perf_evsel__check_stype(struct perf_evsel *evsel,
                return 0;
 
        if (output[type].user_set) {
+               if (allow_user_set)
+                       return 0;
                evname = perf_evsel__name(evsel);
                pr_err("Samples for '%s' event do not have %s attribute set. "
                       "Cannot print '%s' field.\n",
@@ -175,10 +180,22 @@ static int perf_evsel__check_stype(struct perf_evsel *evsel,
        return 0;
 }
 
+static int perf_evsel__check_stype(struct perf_evsel *evsel,
+                                  u64 sample_type, const char *sample_msg,
+                                  enum perf_output_field field)
+{
+       return perf_evsel__do_check_stype(evsel, sample_type, sample_msg, field,
+                                         false);
+}
+
 static int perf_evsel__check_attr(struct perf_evsel *evsel,
                                  struct perf_session *session)
 {
        struct perf_event_attr *attr = &evsel->attr;
+       bool allow_user_set;
+
+       allow_user_set = perf_header__has_feat(&session->header,
+                                              HEADER_AUXTRACE);
 
        if (PRINT_FIELD(TRACE) &&
                !perf_session__has_traces(session, "record -R"))
@@ -191,8 +208,8 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
        }
 
        if (PRINT_FIELD(ADDR) &&
-               perf_evsel__check_stype(evsel, PERF_SAMPLE_ADDR, "ADDR",
-                                       PERF_OUTPUT_ADDR))
+               perf_evsel__do_check_stype(evsel, PERF_SAMPLE_ADDR, "ADDR",
+                                          PERF_OUTPUT_ADDR, allow_user_set))
                return -EINVAL;
 
        if (PRINT_FIELD(SYM) && !PRINT_FIELD(IP) && !PRINT_FIELD(ADDR)) {
@@ -229,8 +246,8 @@ static int perf_evsel__check_attr(struct perf_evsel *evsel,
                return -EINVAL;
 
        if (PRINT_FIELD(CPU) &&
-               perf_evsel__check_stype(evsel, PERF_SAMPLE_CPU, "CPU",
-                                       PERF_OUTPUT_CPU))
+               perf_evsel__do_check_stype(evsel, PERF_SAMPLE_CPU, "CPU",
+                                          PERF_OUTPUT_CPU, allow_user_set))
                return -EINVAL;
 
        if (PRINT_FIELD(PERIOD) &&
@@ -445,6 +462,25 @@ static void print_sample_bts(union perf_event *event,
        printf("\n");
 }
 
+static void print_sample_flags(u32 flags)
+{
+       const char *chars = PERF_IP_FLAG_CHARS;
+       const int n = strlen(PERF_IP_FLAG_CHARS);
+       char str[33];
+       int i, pos = 0;
+
+       for (i = 0; i < n; i++, flags >>= 1) {
+               if (flags & 1)
+                       str[pos++] = chars[i];
+       }
+       for (; i < 32; i++, flags >>= 1) {
+               if (flags & 1)
+                       str[pos++] = '?';
+       }
+       str[pos] = 0;
+       printf("  %-4s ", str);
+}
+
 static void process_event(union perf_event *event, struct perf_sample *sample,
                          struct perf_evsel *evsel, struct addr_location *al)
 {
@@ -464,6 +500,9 @@ static void process_event(union perf_event *event, struct perf_sample *sample,
                printf("%s: ", evname ? evname : "[unknown]");
        }
 
+       if (print_flags)
+               print_sample_flags(sample->flags);
+
        if (is_bts_event(attr)) {
                print_sample_bts(event, sample, evsel, thread, al);
                return;
@@ -568,13 +607,14 @@ static int process_sample_event(struct perf_tool *tool __maybe_unused,
        }
 
        if (al.filtered)
-               return 0;
+               goto out_put;
 
        if (cpu_list && !test_bit(sample->cpu, cpu_bitmap))
-               return 0;
+               goto out_put;
 
        scripting_ops->process_event(event, sample, evsel, &al);
-
+out_put:
+       addr_location__put(&al);
        return 0;
 }
 
@@ -642,8 +682,8 @@ static int process_comm_event(struct perf_tool *tool,
        print_sample_start(sample, thread, evsel);
        perf_event__fprintf(event, stdout);
        ret = 0;
-
 out:
+       thread__put(thread);
        return ret;
 }
 
@@ -674,6 +714,7 @@ static int process_fork_event(struct perf_tool *tool,
        }
        print_sample_start(sample, thread, evsel);
        perf_event__fprintf(event, stdout);
+       thread__put(thread);
 
        return 0;
 }
@@ -682,6 +723,7 @@ static int process_exit_event(struct perf_tool *tool,
                              struct perf_sample *sample,
                              struct machine *machine)
 {
+       int err = 0;
        struct thread *thread;
        struct perf_script *script = container_of(tool, struct perf_script, tool);
        struct perf_session *session = script->session;
@@ -703,9 +745,10 @@ static int process_exit_event(struct perf_tool *tool,
        perf_event__fprintf(event, stdout);
 
        if (perf_event__process_exit(tool, event, sample, machine) < 0)
-               return -1;
+               err = -1;
 
-       return 0;
+       thread__put(thread);
+       return err;
 }
 
 static int process_mmap_event(struct perf_tool *tool,
@@ -735,7 +778,7 @@ static int process_mmap_event(struct perf_tool *tool,
        }
        print_sample_start(sample, thread, evsel);
        perf_event__fprintf(event, stdout);
-
+       thread__put(thread);
        return 0;
 }
 
@@ -766,7 +809,7 @@ static int process_mmap2_event(struct perf_tool *tool,
        }
        print_sample_start(sample, thread, evsel);
        perf_event__fprintf(event, stdout);
-
+       thread__put(thread);
        return 0;
 }
 
@@ -999,12 +1042,15 @@ static int parse_output_fields(const struct option *opt __maybe_unused,
                }
        }
 
-       tok = strtok(tok, ",");
-       while (tok) {
+       for (tok = strtok(tok, ","); tok; tok = strtok(NULL, ",")) {
                for (i = 0; i < imax; ++i) {
                        if (strcmp(tok, all_output_options[i].str) == 0)
                                break;
                }
+               if (i == imax && strcmp(tok, "flags") == 0) {
+                       print_flags = true;
+                       continue;
+               }
                if (i == imax) {
                        fprintf(stderr, "Invalid field requested.\n");
                        rc = -EINVAL;
@@ -1032,8 +1078,6 @@ static int parse_output_fields(const struct option *opt __maybe_unused,
                        }
                        output[type].fields |= all_output_options[i].field;
                }
-
-               tok = strtok(NULL, ",");
        }
 
        if (type >= 0) {
@@ -1497,6 +1541,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
        char *rec_script_path = NULL;
        char *rep_script_path = NULL;
        struct perf_session *session;
+       struct itrace_synth_opts itrace_synth_opts = { .set = false, };
        char *script_path = NULL;
        const char **__argv;
        int i, j, err = 0;
@@ -1511,6 +1556,10 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
                        .attr            = process_attr,
                        .tracing_data    = perf_event__process_tracing_data,
                        .build_id        = perf_event__process_build_id,
+                       .id_index        = perf_event__process_id_index,
+                       .auxtrace_info   = perf_event__process_auxtrace_info,
+                       .auxtrace        = perf_event__process_auxtrace,
+                       .auxtrace_error  = perf_event__process_auxtrace_error,
                        .ordered_events  = true,
                        .ordering_requires_timestamps = true,
                },
@@ -1549,7 +1598,7 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
                     "comma separated output fields prepend with 'type:'. "
                     "Valid types: hw,sw,trace,raw. "
                     "Fields: comm,tid,pid,time,cpu,event,trace,ip,sym,dso,"
-                    "addr,symoff,period", parse_output_fields),
+                    "addr,symoff,period,flags", parse_output_fields),
        OPT_BOOLEAN('a', "all-cpus", &system_wide,
                    "system-wide collection from all CPUs"),
        OPT_STRING('S', "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
@@ -1570,6 +1619,9 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
        OPT_BOOLEAN('\0', "show-mmap-events", &script.show_mmap_events,
                    "Show the mmap events"),
        OPT_BOOLEAN('f', "force", &file.force, "don't complain, do it"),
+       OPT_CALLBACK_OPTARG(0, "itrace", &itrace_synth_opts, NULL, "opts",
+                           "Instruction Tracing options",
+                           itrace_parse_synth_opts),
        OPT_END()
        };
        const char * const script_subcommands[] = { "record", "report", NULL };
@@ -1765,6 +1817,8 @@ int cmd_script(int argc, const char **argv, const char *prefix __maybe_unused)
 
        script.session = session;
 
+       session->itrace_synth_opts = &itrace_synth_opts;
+
        if (cpu_list) {
                err = perf_session__cpu_bitmap(session, cpu_list, cpu_bitmap);
                if (err < 0)
index f7b8218785f6fa8911bc9c8544d7eb14ff9e2f2f..fcf99bdeb19e1cf73c54e0b6edc6d4426dfe1f48 100644 (file)
@@ -73,8 +73,8 @@ static void print_counter(struct perf_evsel *counter, char *prefix);
 static void print_aggr(char *prefix);
 
 /* Default events used for perf stat -T */
-static const char * const transaction_attrs[] = {
-       "task-clock",
+static const char *transaction_attrs = {
+       "task-clock,"
        "{"
        "instructions,"
        "cycles,"
@@ -86,8 +86,8 @@ static const char * const transaction_attrs[] = {
 };
 
 /* More limited version when the CPU does not have all events. */
-static const char * const transaction_limited_attrs[] = {
-       "task-clock",
+static const char * transaction_limited_attrs = {
+       "task-clock,"
        "{"
        "instructions,"
        "cycles,"
@@ -96,30 +96,12 @@ static const char * const transaction_limited_attrs[] = {
        "}"
 };
 
-/* must match transaction_attrs and the beginning limited_attrs */
-enum {
-       T_TASK_CLOCK,
-       T_INSTRUCTIONS,
-       T_CYCLES,
-       T_CYCLES_IN_TX,
-       T_TRANSACTION_START,
-       T_ELISION_START,
-       T_CYCLES_IN_TX_CP,
-};
-
 static struct perf_evlist      *evsel_list;
 
 static struct target target = {
        .uid    = UINT_MAX,
 };
 
-enum aggr_mode {
-       AGGR_NONE,
-       AGGR_GLOBAL,
-       AGGR_SOCKET,
-       AGGR_CORE,
-};
-
 static int                     run_count                       =  1;
 static bool                    no_inherit                      = false;
 static bool                    scale                           =  true;
@@ -147,10 +129,6 @@ static int                 (*aggr_get_id)(struct cpu_map *m, int cpu);
 
 static volatile int done = 0;
 
-struct perf_stat {
-       struct stats      res_stats[3];
-};
-
 static inline void diff_timespec(struct timespec *r, struct timespec *a,
                                 struct timespec *b)
 {
@@ -180,6 +158,8 @@ static void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
 
        for (i = 0; i < 3; i++)
                init_stats(&ps->res_stats[i]);
+
+       perf_stat_evsel_id_init(evsel);
 }
 
 static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
@@ -198,24 +178,19 @@ static void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
 
 static int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel)
 {
-       void *addr;
-       size_t sz;
+       struct perf_counts *counts;
 
-       sz = sizeof(*evsel->counts) +
-            (perf_evsel__nr_cpus(evsel) * sizeof(struct perf_counts_values));
+       counts = perf_counts__new(perf_evsel__nr_cpus(evsel));
+       if (counts)
+               evsel->prev_raw_counts = counts;
 
-       addr = zalloc(sz);
-       if (!addr)
-               return -ENOMEM;
-
-       evsel->prev_raw_counts =  addr;
-
-       return 0;
+       return counts ? 0 : -ENOMEM;
 }
 
 static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel)
 {
-       zfree(&evsel->prev_raw_counts);
+       perf_counts__delete(evsel->prev_raw_counts);
+       evsel->prev_raw_counts = NULL;
 }
 
 static void perf_evlist__free_stats(struct perf_evlist *evlist)
@@ -247,22 +222,6 @@ out_free:
        return -1;
 }
 
-static struct stats runtime_nsecs_stats[MAX_NR_CPUS];
-static struct stats runtime_cycles_stats[MAX_NR_CPUS];
-static struct stats runtime_stalled_cycles_front_stats[MAX_NR_CPUS];
-static struct stats runtime_stalled_cycles_back_stats[MAX_NR_CPUS];
-static struct stats runtime_branches_stats[MAX_NR_CPUS];
-static struct stats runtime_cacherefs_stats[MAX_NR_CPUS];
-static struct stats runtime_l1_dcache_stats[MAX_NR_CPUS];
-static struct stats runtime_l1_icache_stats[MAX_NR_CPUS];
-static struct stats runtime_ll_cache_stats[MAX_NR_CPUS];
-static struct stats runtime_itlb_cache_stats[MAX_NR_CPUS];
-static struct stats runtime_dtlb_cache_stats[MAX_NR_CPUS];
-static struct stats runtime_cycles_in_tx_stats[MAX_NR_CPUS];
-static struct stats walltime_nsecs_stats;
-static struct stats runtime_transaction_stats[MAX_NR_CPUS];
-static struct stats runtime_elision_stats[MAX_NR_CPUS];
-
 static void perf_stat__reset_stats(struct perf_evlist *evlist)
 {
        struct perf_evsel *evsel;
@@ -272,23 +231,7 @@ static void perf_stat__reset_stats(struct perf_evlist *evlist)
                perf_evsel__reset_counts(evsel, perf_evsel__nr_cpus(evsel));
        }
 
-       memset(runtime_nsecs_stats, 0, sizeof(runtime_nsecs_stats));
-       memset(runtime_cycles_stats, 0, sizeof(runtime_cycles_stats));
-       memset(runtime_stalled_cycles_front_stats, 0, sizeof(runtime_stalled_cycles_front_stats));
-       memset(runtime_stalled_cycles_back_stats, 0, sizeof(runtime_stalled_cycles_back_stats));
-       memset(runtime_branches_stats, 0, sizeof(runtime_branches_stats));
-       memset(runtime_cacherefs_stats, 0, sizeof(runtime_cacherefs_stats));
-       memset(runtime_l1_dcache_stats, 0, sizeof(runtime_l1_dcache_stats));
-       memset(runtime_l1_icache_stats, 0, sizeof(runtime_l1_icache_stats));
-       memset(runtime_ll_cache_stats, 0, sizeof(runtime_ll_cache_stats));
-       memset(runtime_itlb_cache_stats, 0, sizeof(runtime_itlb_cache_stats));
-       memset(runtime_dtlb_cache_stats, 0, sizeof(runtime_dtlb_cache_stats));
-       memset(runtime_cycles_in_tx_stats, 0,
-                       sizeof(runtime_cycles_in_tx_stats));
-       memset(runtime_transaction_stats, 0,
-               sizeof(runtime_transaction_stats));
-       memset(runtime_elision_stats, 0, sizeof(runtime_elision_stats));
-       memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats));
+       perf_stat__reset_shadow_stats();
 }
 
 static int create_perf_stat_counter(struct perf_evsel *evsel)
@@ -325,70 +268,6 @@ static inline int nsec_counter(struct perf_evsel *evsel)
        return 0;
 }
 
-static struct perf_evsel *nth_evsel(int n)
-{
-       static struct perf_evsel **array;
-       static int array_len;
-       struct perf_evsel *ev;
-       int j;
-
-       /* Assumes this only called when evsel_list does not change anymore. */
-       if (!array) {
-               evlist__for_each(evsel_list, ev)
-                       array_len++;
-               array = malloc(array_len * sizeof(void *));
-               if (!array)
-                       exit(ENOMEM);
-               j = 0;
-               evlist__for_each(evsel_list, ev)
-                       array[j++] = ev;
-       }
-       if (n < array_len)
-               return array[n];
-       return NULL;
-}
-
-/*
- * Update various tracking values we maintain to print
- * more semantic information such as miss/hit ratios,
- * instruction rates, etc:
- */
-static void update_shadow_stats(struct perf_evsel *counter, u64 *count,
-                               int cpu)
-{
-       if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK))
-               update_stats(&runtime_nsecs_stats[cpu], count[0]);
-       else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
-               update_stats(&runtime_cycles_stats[cpu], count[0]);
-       else if (transaction_run &&
-                perf_evsel__cmp(counter, nth_evsel(T_CYCLES_IN_TX)))
-               update_stats(&runtime_cycles_in_tx_stats[cpu], count[0]);
-       else if (transaction_run &&
-                perf_evsel__cmp(counter, nth_evsel(T_TRANSACTION_START)))
-               update_stats(&runtime_transaction_stats[cpu], count[0]);
-       else if (transaction_run &&
-                perf_evsel__cmp(counter, nth_evsel(T_ELISION_START)))
-               update_stats(&runtime_elision_stats[cpu], count[0]);
-       else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
-               update_stats(&runtime_stalled_cycles_front_stats[cpu], count[0]);
-       else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
-               update_stats(&runtime_stalled_cycles_back_stats[cpu], count[0]);
-       else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
-               update_stats(&runtime_branches_stats[cpu], count[0]);
-       else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
-               update_stats(&runtime_cacherefs_stats[cpu], count[0]);
-       else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
-               update_stats(&runtime_l1_dcache_stats[cpu], count[0]);
-       else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
-               update_stats(&runtime_l1_icache_stats[cpu], count[0]);
-       else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL))
-               update_stats(&runtime_ll_cache_stats[cpu], count[0]);
-       else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
-               update_stats(&runtime_dtlb_cache_stats[cpu], count[0]);
-       else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
-               update_stats(&runtime_itlb_cache_stats[cpu], count[0]);
-}
-
 static void zero_per_pkg(struct perf_evsel *counter)
 {
        if (counter->per_pkg_mask)
@@ -449,7 +328,7 @@ static int read_cb(struct perf_evsel *evsel, int cpu, int thread __maybe_unused,
                perf_counts_values__scale(count, scale, NULL);
                evsel->counts->cpu[cpu] = *count;
                if (aggr_mode == AGGR_NONE)
-                       update_shadow_stats(evsel, count->values, cpu);
+                       perf_stat__update_shadow_stats(evsel, count->values, cpu);
                break;
        case AGGR_GLOBAL:
                aggr->val += count->val;
@@ -497,7 +376,7 @@ static int read_counter_aggr(struct perf_evsel *counter)
        /*
         * Save the full runtime - to allow normalization during printout:
         */
-       update_shadow_stats(counter, count, 0);
+       perf_stat__update_shadow_stats(counter, count, 0);
 
        return 0;
 }
@@ -665,7 +544,10 @@ static int __run_perf_stat(int argc, const char **argv)
                                        ui__warning("%s event is not supported by the kernel.\n",
                                                    perf_evsel__name(counter));
                                counter->supported = false;
-                               continue;
+
+                               if ((counter->leader != counter) ||
+                                   !(counter->leader->nr_members > 1))
+                                       continue;
                        }
 
                        perf_evsel__open_strerror(counter, &target,
@@ -875,188 +757,8 @@ static void nsec_printout(int id, int nr, struct perf_evsel *evsel, double avg)
                fprintf(output, "                                   ");
 }
 
-/* used for get_ratio_color() */
-enum grc_type {
-       GRC_STALLED_CYCLES_FE,
-       GRC_STALLED_CYCLES_BE,
-       GRC_CACHE_MISSES,
-       GRC_MAX_NR
-};
-
-static const char *get_ratio_color(enum grc_type type, double ratio)
-{
-       static const double grc_table[GRC_MAX_NR][3] = {
-               [GRC_STALLED_CYCLES_FE] = { 50.0, 30.0, 10.0 },
-               [GRC_STALLED_CYCLES_BE] = { 75.0, 50.0, 20.0 },
-               [GRC_CACHE_MISSES]      = { 20.0, 10.0, 5.0 },
-       };
-       const char *color = PERF_COLOR_NORMAL;
-
-       if (ratio > grc_table[type][0])
-               color = PERF_COLOR_RED;
-       else if (ratio > grc_table[type][1])
-               color = PERF_COLOR_MAGENTA;
-       else if (ratio > grc_table[type][2])
-               color = PERF_COLOR_YELLOW;
-
-       return color;
-}
-
-static void print_stalled_cycles_frontend(int cpu,
-                                         struct perf_evsel *evsel
-                                         __maybe_unused, double avg)
-{
-       double total, ratio = 0.0;
-       const char *color;
-
-       total = avg_stats(&runtime_cycles_stats[cpu]);
-
-       if (total)
-               ratio = avg / total * 100.0;
-
-       color = get_ratio_color(GRC_STALLED_CYCLES_FE, ratio);
-
-       fprintf(output, " #  ");
-       color_fprintf(output, color, "%6.2f%%", ratio);
-       fprintf(output, " frontend cycles idle   ");
-}
-
-static void print_stalled_cycles_backend(int cpu,
-                                        struct perf_evsel *evsel
-                                        __maybe_unused, double avg)
-{
-       double total, ratio = 0.0;
-       const char *color;
-
-       total = avg_stats(&runtime_cycles_stats[cpu]);
-
-       if (total)
-               ratio = avg / total * 100.0;
-
-       color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio);
-
-       fprintf(output, " #  ");
-       color_fprintf(output, color, "%6.2f%%", ratio);
-       fprintf(output, " backend  cycles idle   ");
-}
-
-static void print_branch_misses(int cpu,
-                               struct perf_evsel *evsel __maybe_unused,
-                               double avg)
-{
-       double total, ratio = 0.0;
-       const char *color;
-
-       total = avg_stats(&runtime_branches_stats[cpu]);
-
-       if (total)
-               ratio = avg / total * 100.0;
-
-       color = get_ratio_color(GRC_CACHE_MISSES, ratio);
-
-       fprintf(output, " #  ");
-       color_fprintf(output, color, "%6.2f%%", ratio);
-       fprintf(output, " of all branches        ");
-}
-
-static void print_l1_dcache_misses(int cpu,
-                                  struct perf_evsel *evsel __maybe_unused,
-                                  double avg)
-{
-       double total, ratio = 0.0;
-       const char *color;
-
-       total = avg_stats(&runtime_l1_dcache_stats[cpu]);
-
-       if (total)
-               ratio = avg / total * 100.0;
-
-       color = get_ratio_color(GRC_CACHE_MISSES, ratio);
-
-       fprintf(output, " #  ");
-       color_fprintf(output, color, "%6.2f%%", ratio);
-       fprintf(output, " of all L1-dcache hits  ");
-}
-
-static void print_l1_icache_misses(int cpu,
-                                  struct perf_evsel *evsel __maybe_unused,
-                                  double avg)
-{
-       double total, ratio = 0.0;
-       const char *color;
-
-       total = avg_stats(&runtime_l1_icache_stats[cpu]);
-
-       if (total)
-               ratio = avg / total * 100.0;
-
-       color = get_ratio_color(GRC_CACHE_MISSES, ratio);
-
-       fprintf(output, " #  ");
-       color_fprintf(output, color, "%6.2f%%", ratio);
-       fprintf(output, " of all L1-icache hits  ");
-}
-
-static void print_dtlb_cache_misses(int cpu,
-                                   struct perf_evsel *evsel __maybe_unused,
-                                   double avg)
-{
-       double total, ratio = 0.0;
-       const char *color;
-
-       total = avg_stats(&runtime_dtlb_cache_stats[cpu]);
-
-       if (total)
-               ratio = avg / total * 100.0;
-
-       color = get_ratio_color(GRC_CACHE_MISSES, ratio);
-
-       fprintf(output, " #  ");
-       color_fprintf(output, color, "%6.2f%%", ratio);
-       fprintf(output, " of all dTLB cache hits ");
-}
-
-static void print_itlb_cache_misses(int cpu,
-                                   struct perf_evsel *evsel __maybe_unused,
-                                   double avg)
-{
-       double total, ratio = 0.0;
-       const char *color;
-
-       total = avg_stats(&runtime_itlb_cache_stats[cpu]);
-
-       if (total)
-               ratio = avg / total * 100.0;
-
-       color = get_ratio_color(GRC_CACHE_MISSES, ratio);
-
-       fprintf(output, " #  ");
-       color_fprintf(output, color, "%6.2f%%", ratio);
-       fprintf(output, " of all iTLB cache hits ");
-}
-
-static void print_ll_cache_misses(int cpu,
-                                 struct perf_evsel *evsel __maybe_unused,
-                                 double avg)
-{
-       double total, ratio = 0.0;
-       const char *color;
-
-       total = avg_stats(&runtime_ll_cache_stats[cpu]);
-
-       if (total)
-               ratio = avg / total * 100.0;
-
-       color = get_ratio_color(GRC_CACHE_MISSES, ratio);
-
-       fprintf(output, " #  ");
-       color_fprintf(output, color, "%6.2f%%", ratio);
-       fprintf(output, " of all LL-cache hits   ");
-}
-
 static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
 {
-       double total, ratio = 0.0, total2;
        double sc =  evsel->scale;
        const char *fmt;
        int cpu = cpu_map__id_to_cpu(id);
@@ -1090,138 +792,7 @@ static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
        if (csv_output || interval)
                return;
 
-       if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
-               total = avg_stats(&runtime_cycles_stats[cpu]);
-               if (total) {
-                       ratio = avg / total;
-                       fprintf(output, " #   %5.2f  insns per cycle        ", ratio);
-               } else {
-                       fprintf(output, "                                   ");
-               }
-               total = avg_stats(&runtime_stalled_cycles_front_stats[cpu]);
-               total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[cpu]));
-
-               if (total && avg) {
-                       ratio = total / avg;
-                       fprintf(output, "\n");
-                       if (aggr_mode == AGGR_NONE)
-                               fprintf(output, "        ");
-                       fprintf(output, "                                                  #   %5.2f  stalled cycles per insn", ratio);
-               }
-
-       } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) &&
-                       runtime_branches_stats[cpu].n != 0) {
-               print_branch_misses(cpu, evsel, avg);
-       } else if (
-               evsel->attr.type == PERF_TYPE_HW_CACHE &&
-               evsel->attr.config ==  ( PERF_COUNT_HW_CACHE_L1D |
-                                       ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
-                                       ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
-                       runtime_l1_dcache_stats[cpu].n != 0) {
-               print_l1_dcache_misses(cpu, evsel, avg);
-       } else if (
-               evsel->attr.type == PERF_TYPE_HW_CACHE &&
-               evsel->attr.config ==  ( PERF_COUNT_HW_CACHE_L1I |
-                                       ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
-                                       ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
-                       runtime_l1_icache_stats[cpu].n != 0) {
-               print_l1_icache_misses(cpu, evsel, avg);
-       } else if (
-               evsel->attr.type == PERF_TYPE_HW_CACHE &&
-               evsel->attr.config ==  ( PERF_COUNT_HW_CACHE_DTLB |
-                                       ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
-                                       ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
-                       runtime_dtlb_cache_stats[cpu].n != 0) {
-               print_dtlb_cache_misses(cpu, evsel, avg);
-       } else if (
-               evsel->attr.type == PERF_TYPE_HW_CACHE &&
-               evsel->attr.config ==  ( PERF_COUNT_HW_CACHE_ITLB |
-                                       ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
-                                       ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
-                       runtime_itlb_cache_stats[cpu].n != 0) {
-               print_itlb_cache_misses(cpu, evsel, avg);
-       } else if (
-               evsel->attr.type == PERF_TYPE_HW_CACHE &&
-               evsel->attr.config ==  ( PERF_COUNT_HW_CACHE_LL |
-                                       ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
-                                       ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
-                       runtime_ll_cache_stats[cpu].n != 0) {
-               print_ll_cache_misses(cpu, evsel, avg);
-       } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES) &&
-                       runtime_cacherefs_stats[cpu].n != 0) {
-               total = avg_stats(&runtime_cacherefs_stats[cpu]);
-
-               if (total)
-                       ratio = avg * 100 / total;
-
-               fprintf(output, " # %8.3f %% of all cache refs    ", ratio);
-
-       } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) {
-               print_stalled_cycles_frontend(cpu, evsel, avg);
-       } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) {
-               print_stalled_cycles_backend(cpu, evsel, avg);
-       } else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
-               total = avg_stats(&runtime_nsecs_stats[cpu]);
-
-               if (total) {
-                       ratio = avg / total;
-                       fprintf(output, " # %8.3f GHz                    ", ratio);
-               } else {
-                       fprintf(output, "                                   ");
-               }
-       } else if (transaction_run &&
-                  perf_evsel__cmp(evsel, nth_evsel(T_CYCLES_IN_TX))) {
-               total = avg_stats(&runtime_cycles_stats[cpu]);
-               if (total)
-                       fprintf(output,
-                               " #   %5.2f%% transactional cycles   ",
-                               100.0 * (avg / total));
-       } else if (transaction_run &&
-                  perf_evsel__cmp(evsel, nth_evsel(T_CYCLES_IN_TX_CP))) {
-               total = avg_stats(&runtime_cycles_stats[cpu]);
-               total2 = avg_stats(&runtime_cycles_in_tx_stats[cpu]);
-               if (total2 < avg)
-                       total2 = avg;
-               if (total)
-                       fprintf(output,
-                               " #   %5.2f%% aborted cycles         ",
-                               100.0 * ((total2-avg) / total));
-       } else if (transaction_run &&
-                  perf_evsel__cmp(evsel, nth_evsel(T_TRANSACTION_START)) &&
-                  avg > 0 &&
-                  runtime_cycles_in_tx_stats[cpu].n != 0) {
-               total = avg_stats(&runtime_cycles_in_tx_stats[cpu]);
-
-               if (total)
-                       ratio = total / avg;
-
-               fprintf(output, " # %8.0f cycles / transaction   ", ratio);
-       } else if (transaction_run &&
-                  perf_evsel__cmp(evsel, nth_evsel(T_ELISION_START)) &&
-                  avg > 0 &&
-                  runtime_cycles_in_tx_stats[cpu].n != 0) {
-               total = avg_stats(&runtime_cycles_in_tx_stats[cpu]);
-
-               if (total)
-                       ratio = total / avg;
-
-               fprintf(output, " # %8.0f cycles / elision       ", ratio);
-       } else if (runtime_nsecs_stats[cpu].n != 0) {
-               char unit = 'M';
-
-               total = avg_stats(&runtime_nsecs_stats[cpu]);
-
-               if (total)
-                       ratio = 1000.0 * avg / total;
-               if (ratio < 0.001) {
-                       ratio *= 1000;
-                       unit = 'K';
-               }
-
-               fprintf(output, " # %8.3f %c/sec                  ", ratio, unit);
-       } else {
-               fprintf(output, "                                   ");
-       }
+       perf_stat__print_shadow_stats(output, evsel, avg, cpu, aggr_mode);
 }
 
 static void print_aggr(char *prefix)
@@ -1536,17 +1107,6 @@ static int perf_stat_init_aggr_mode(void)
        return 0;
 }
 
-static int setup_events(const char * const *attrs, unsigned len)
-{
-       unsigned i;
-
-       for (i = 0; i < len; i++) {
-               if (parse_events(evsel_list, attrs[i]))
-                       return -1;
-       }
-       return 0;
-}
-
 /*
  * Add default attributes, if there were no attributes specified or
  * if -d/--detailed, -d -d or -d -d -d is used:
@@ -1668,12 +1228,10 @@ static int add_default_attributes(void)
                int err;
                if (pmu_have_event("cpu", "cycles-ct") &&
                    pmu_have_event("cpu", "el-start"))
-                       err = setup_events(transaction_attrs,
-                                       ARRAY_SIZE(transaction_attrs));
+                       err = parse_events(evsel_list, transaction_attrs, NULL);
                else
-                       err = setup_events(transaction_limited_attrs,
-                                ARRAY_SIZE(transaction_limited_attrs));
-               if (err < 0) {
+                       err = parse_events(evsel_list, transaction_limited_attrs, NULL);
+               if (err) {
                        fprintf(stderr, "Cannot set up transaction events\n");
                        return -1;
                }
index e50fe1187b0ba2ca808f6c0a0d3effd8156b4982..30e59620179daef63c272c3119f884a8caefd12f 100644 (file)
@@ -61,13 +61,13 @@ struct timechart {
                                tasks_only,
                                with_backtrace,
                                topology;
+       bool                    force;
        /* IO related settings */
-       u64                     io_events;
        bool                    io_only,
                                skip_eagain;
+       u64                     io_events;
        u64                     min_time,
                                merge_dist;
-       bool                    force;
 };
 
 struct per_pidcomm;
@@ -523,7 +523,7 @@ static const char *cat_backtrace(union perf_event *event,
                                 * Discard all.
                                 */
                                zfree(&p);
-                               goto exit;
+                               goto exit_put;
                        }
                        continue;
                }
@@ -538,7 +538,8 @@ static const char *cat_backtrace(union perf_event *event,
                else
                        fprintf(f, "..... %016" PRIx64 "\n", ip);
        }
-
+exit_put:
+       addr_location__put(&al);
 exit:
        fclose(f);
 
index 6a4d5d41c671d0ce176deb13d318de35acee0161..619a8696fda7c939cd0e6497abab5845813bda12 100644 (file)
@@ -235,10 +235,13 @@ static void perf_top__show_details(struct perf_top *top)
 
        more = symbol__annotate_printf(symbol, he->ms.map, top->sym_evsel,
                                       0, top->sym_pcnt_filter, top->print_entries, 4);
-       if (top->zero)
-               symbol__annotate_zero_histogram(symbol, top->sym_evsel->idx);
-       else
-               symbol__annotate_decay_histogram(symbol, top->sym_evsel->idx);
+
+       if (top->evlist->enabled) {
+               if (top->zero)
+                       symbol__annotate_zero_histogram(symbol, top->sym_evsel->idx);
+               else
+                       symbol__annotate_decay_histogram(symbol, top->sym_evsel->idx);
+       }
        if (more != 0)
                printf("%d lines not displayed, maybe increase display entries [e]\n", more);
 out_unlock:
@@ -276,11 +279,13 @@ static void perf_top__print_sym_table(struct perf_top *top)
                return;
        }
 
-       if (top->zero) {
-               hists__delete_entries(hists);
-       } else {
-               hists__decay_entries(hists, top->hide_user_symbols,
-                                    top->hide_kernel_symbols);
+       if (top->evlist->enabled) {
+               if (top->zero) {
+                       hists__delete_entries(hists);
+               } else {
+                       hists__decay_entries(hists, top->hide_user_symbols,
+                                            top->hide_kernel_symbols);
+               }
        }
 
        hists__collapse_resort(hists, NULL);
@@ -545,11 +550,13 @@ static void perf_top__sort_new_samples(void *arg)
 
        hists = evsel__hists(t->sym_evsel);
 
-       if (t->zero) {
-               hists__delete_entries(hists);
-       } else {
-               hists__decay_entries(hists, t->hide_user_symbols,
-                                    t->hide_kernel_symbols);
+       if (t->evlist->enabled) {
+               if (t->zero) {
+                       hists__delete_entries(hists);
+               } else {
+                       hists__decay_entries(hists, t->hide_user_symbols,
+                                            t->hide_kernel_symbols);
+               }
        }
 
        hists__collapse_resort(hists, NULL);
@@ -579,8 +586,27 @@ static void *display_thread_tui(void *arg)
                hists->uid_filter_str = top->record_opts.target.uid_str;
        }
 
-       perf_evlist__tui_browse_hists(top->evlist, help, &hbt, top->min_percent,
-                                     &top->session->header.env);
+       while (true)  {
+               int key = perf_evlist__tui_browse_hists(top->evlist, help, &hbt,
+                                                       top->min_percent,
+                                                       &top->session->header.env);
+
+               if (key != 'f')
+                       break;
+
+               perf_evlist__toggle_enable(top->evlist);
+               /*
+                * No need to refresh, resort/decay histogram entries
+                * if we are not collecting samples:
+                */
+               if (top->evlist->enabled) {
+                       hbt.refresh = top->delay_secs;
+                       help = "Press 'f' to disable the events or 'h' to see other hotkeys";
+               } else {
+                       help = "Press 'f' again to re-enable the events";
+                       hbt.refresh = 0;
+               }
+       }
 
        done = 1;
        return NULL;
@@ -775,7 +801,9 @@ static void perf_event__process_sample(struct perf_tool *tool,
        if (al.sym == NULL || !al.sym->ignore) {
                struct hists *hists = evsel__hists(evsel);
                struct hist_entry_iter iter = {
-                       .add_entry_cb = hist_iter__top_callback,
+                       .evsel          = evsel,
+                       .sample         = sample,
+                       .add_entry_cb   = hist_iter__top_callback,
                };
 
                if (symbol_conf.cumulate_callchain)
@@ -785,15 +813,14 @@ static void perf_event__process_sample(struct perf_tool *tool,
 
                pthread_mutex_lock(&hists->lock);
 
-               err = hist_entry_iter__add(&iter, &al, evsel, sample,
-                                          top->max_stack, top);
+               err = hist_entry_iter__add(&iter, &al, top->max_stack, top);
                if (err < 0)
                        pr_err("Problem incrementing symbol period, skipping event\n");
 
                pthread_mutex_unlock(&hists->lock);
        }
 
-       return;
+       addr_location__put(&al);
 }
 
 static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
@@ -950,7 +977,7 @@ static int __cmd_top(struct perf_top *top)
                goto out_delete;
 
        machine__synthesize_threads(&top->session->machines.host, &opts->target,
-                                   top->evlist->threads, false);
+                                   top->evlist->threads, false, opts->proc_map_timeout);
        ret = perf_top__start_counters(top);
        if (ret)
                goto out_delete;
@@ -1060,6 +1087,7 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
                        .target         = {
                                .uses_mmap   = true,
                        },
+                       .proc_map_timeout    = 500,
                },
                .max_stack           = PERF_MAX_STACK_DEPTH,
                .sym_pcnt_filter     = 5,
@@ -1159,6 +1187,8 @@ int cmd_top(int argc, const char **argv, const char *prefix __maybe_unused)
        OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
                   "width[,width...]",
                   "don't try to adjust column width, use these fixed values"),
+       OPT_UINTEGER(0, "proc-map-timeout", &opts->proc_map_timeout,
+                       "per thread proc mmap processing timeout in ms"),
        OPT_END()
        };
        const char * const top_usage[] = {
index e122970361f21af6d07c321480aefa2cb90bf31d..de5d277d1ad7cb97cac2c5da67032fc8a12ffdf6 100644 (file)
@@ -16,7 +16,6 @@
 
 #include <libaudit.h>
 #include <stdlib.h>
-#include <sys/eventfd.h>
 #include <sys/mman.h>
 #include <linux/futex.h>
 
 # define EFD_SEMAPHORE         1
 #endif
 
+#ifndef EFD_NONBLOCK
+# define EFD_NONBLOCK          00004000
+#endif
+
+#ifndef EFD_CLOEXEC
+# define EFD_CLOEXEC           02000000
+#endif
+
+#ifndef O_CLOEXEC
+# define O_CLOEXEC             02000000
+#endif
+
+#ifndef SOCK_DCCP
+# define SOCK_DCCP             6
+#endif
+
+#ifndef SOCK_CLOEXEC
+# define SOCK_CLOEXEC          02000000
+#endif
+
+#ifndef SOCK_NONBLOCK
+# define SOCK_NONBLOCK         00004000
+#endif
+
+#ifndef MSG_CMSG_CLOEXEC
+# define MSG_CMSG_CLOEXEC      0x40000000
+#endif
+
+#ifndef PERF_FLAG_FD_NO_GROUP
+# define PERF_FLAG_FD_NO_GROUP         (1UL << 0)
+#endif
+
+#ifndef PERF_FLAG_FD_OUTPUT
+# define PERF_FLAG_FD_OUTPUT           (1UL << 1)
+#endif
+
+#ifndef PERF_FLAG_PID_CGROUP
+# define PERF_FLAG_PID_CGROUP          (1UL << 2) /* pid=cgroup id, per-cpu mode only */
+#endif
+
+#ifndef PERF_FLAG_FD_CLOEXEC
+# define PERF_FLAG_FD_CLOEXEC          (1UL << 3) /* O_CLOEXEC */
+#endif
+
+
 struct tp_field {
        int offset;
        union {
@@ -331,6 +375,14 @@ static size_t syscall_arg__scnprintf_hex(char *bf, size_t size,
 
 #define SCA_HEX syscall_arg__scnprintf_hex
 
+static size_t syscall_arg__scnprintf_int(char *bf, size_t size,
+                                        struct syscall_arg *arg)
+{
+       return scnprintf(bf, size, "%d", arg->val);
+}
+
+#define SCA_INT syscall_arg__scnprintf_int
+
 static size_t syscall_arg__scnprintf_mmap_prot(char *bf, size_t size,
                                               struct syscall_arg *arg)
 {
@@ -783,6 +835,34 @@ static size_t syscall_arg__scnprintf_open_flags(char *bf, size_t size,
 
 #define SCA_OPEN_FLAGS syscall_arg__scnprintf_open_flags
 
+static size_t syscall_arg__scnprintf_perf_flags(char *bf, size_t size,
+                                               struct syscall_arg *arg)
+{
+       int printed = 0, flags = arg->val;
+
+       if (flags == 0)
+               return 0;
+
+#define        P_FLAG(n) \
+       if (flags & PERF_FLAG_##n) { \
+               printed += scnprintf(bf + printed, size - printed, "%s%s", printed ? "|" : "", #n); \
+               flags &= ~PERF_FLAG_##n; \
+       }
+
+       P_FLAG(FD_NO_GROUP);
+       P_FLAG(FD_OUTPUT);
+       P_FLAG(PID_CGROUP);
+       P_FLAG(FD_CLOEXEC);
+#undef P_FLAG
+
+       if (flags)
+               printed += scnprintf(bf + printed, size - printed, "%s%#x", printed ? "|" : "", flags);
+
+       return printed;
+}
+
+#define SCA_PERF_FLAGS syscall_arg__scnprintf_perf_flags
+
 static size_t syscall_arg__scnprintf_eventfd_flags(char *bf, size_t size,
                                                   struct syscall_arg *arg)
 {
@@ -1050,6 +1130,11 @@ static struct syscall_fmt {
        { .name     = "openat",     .errmsg = true,
          .arg_scnprintf = { [0] = SCA_FDAT, /* dfd */
                             [2] = SCA_OPEN_FLAGS, /* flags */ }, },
+       { .name     = "perf_event_open", .errmsg = true,
+         .arg_scnprintf = { [1] = SCA_INT, /* pid */
+                            [2] = SCA_INT, /* cpu */
+                            [3] = SCA_FD,  /* group_fd */
+                            [4] = SCA_PERF_FLAGS,  /* flags */ }, },
        { .name     = "pipe2",      .errmsg = true,
          .arg_scnprintf = { [1] = SCA_PIPE_FLAGS, /* flags */ }, },
        { .name     = "poll",       .errmsg = true, .timeout = true, },
@@ -1433,7 +1518,8 @@ static int trace__symbols_init(struct trace *trace, struct perf_evlist *evlist)
                return -ENOMEM;
 
        err = __machine__synthesize_threads(trace->host, &trace->tool, &trace->opts.target,
-                                           evlist->threads, trace__tool_process, false);
+                                           evlist->threads, trace__tool_process, false,
+                                           trace->opts.proc_map_timeout);
        if (err)
                symbol__exit();
 
@@ -1712,7 +1798,7 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
        void *args;
        size_t printed = 0;
        struct thread *thread;
-       int id = perf_evsel__sc_tp_uint(evsel, id, sample);
+       int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
        struct syscall *sc = trace__syscall_info(trace, evsel, id);
        struct thread_trace *ttrace;
 
@@ -1725,14 +1811,14 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
        thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
        ttrace = thread__trace(thread, trace->output);
        if (ttrace == NULL)
-               return -1;
+               goto out_put;
 
        args = perf_evsel__sc_tp_ptr(evsel, args, sample);
 
        if (ttrace->entry_str == NULL) {
                ttrace->entry_str = malloc(1024);
                if (!ttrace->entry_str)
-                       return -1;
+                       goto out_put;
        }
 
        if (!trace->summary_only)
@@ -1757,8 +1843,10 @@ static int trace__sys_enter(struct trace *trace, struct perf_evsel *evsel,
                thread__put(trace->current);
                trace->current = thread__get(thread);
        }
-
-       return 0;
+       err = 0;
+out_put:
+       thread__put(thread);
+       return err;
 }
 
 static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
@@ -1768,7 +1856,7 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
        long ret;
        u64 duration = 0;
        struct thread *thread;
-       int id = perf_evsel__sc_tp_uint(evsel, id, sample);
+       int id = perf_evsel__sc_tp_uint(evsel, id, sample), err = -1;
        struct syscall *sc = trace__syscall_info(trace, evsel, id);
        struct thread_trace *ttrace;
 
@@ -1781,7 +1869,7 @@ static int trace__sys_exit(struct trace *trace, struct perf_evsel *evsel,
        thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
        ttrace = thread__trace(thread, trace->output);
        if (ttrace == NULL)
-               return -1;
+               goto out_put;
 
        if (trace->summary)
                thread__update_stats(ttrace, id, sample);
@@ -1835,8 +1923,10 @@ signed_print:
        fputc('\n', trace->output);
 out:
        ttrace->entry_pending = false;
-
-       return 0;
+       err = 0;
+out_put:
+       thread__put(thread);
+       return err;
 }
 
 static int trace__vfs_getname(struct trace *trace, struct perf_evsel *evsel,
@@ -1863,6 +1953,7 @@ static int trace__sched_stat_runtime(struct trace *trace, struct perf_evsel *evs
 
        ttrace->runtime_ms += runtime_ms;
        trace->runtime_ms += runtime_ms;
+       thread__put(thread);
        return 0;
 
 out_dump:
@@ -1872,6 +1963,7 @@ out_dump:
               (pid_t)perf_evsel__intval(evsel, sample, "pid"),
               runtime,
               perf_evsel__intval(evsel, sample, "vruntime"));
+       thread__put(thread);
        return 0;
 }
 
@@ -1924,11 +2016,12 @@ static int trace__pgfault(struct trace *trace,
        struct addr_location al;
        char map_type = 'd';
        struct thread_trace *ttrace;
+       int err = -1;
 
        thread = machine__findnew_thread(trace->host, sample->pid, sample->tid);
        ttrace = thread__trace(thread, trace->output);
        if (ttrace == NULL)
-               return -1;
+               goto out_put;
 
        if (evsel->attr.config == PERF_COUNT_SW_PAGE_FAULTS_MAJ)
                ttrace->pfmaj++;
@@ -1936,7 +2029,7 @@ static int trace__pgfault(struct trace *trace,
                ttrace->pfmin++;
 
        if (trace->summary_only)
-               return 0;
+               goto out;
 
        thread__find_addr_location(thread, cpumode, MAP__FUNCTION,
                              sample->ip, &al);
@@ -1967,8 +2060,11 @@ static int trace__pgfault(struct trace *trace,
        print_location(trace->output, sample, &al, true, false);
 
        fprintf(trace->output, " (%c%c)\n", map_type, al.level);
-
-       return 0;
+out:
+       err = 0;
+out_put:
+       thread__put(thread);
+       return err;
 }
 
 static bool skip_sample(struct trace *trace, struct perf_sample *sample)
@@ -2652,6 +2748,7 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
                        .user_interval = ULLONG_MAX,
                        .no_buffering  = true,
                        .mmap_pages    = UINT_MAX,
+                       .proc_map_timeout  = 500,
                },
                .output = stdout,
                .show_comm = true,
@@ -2666,16 +2763,15 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
        OPT_BOOLEAN(0, "comm", &trace.show_comm,
                    "show the thread COMM next to its id"),
        OPT_BOOLEAN(0, "tool_stats", &trace.show_tool_stats, "show tool stats"),
-       OPT_STRING('e', "expr", &ev_qualifier_str, "expr",
-                   "list of events to trace"),
+       OPT_STRING('e', "expr", &ev_qualifier_str, "expr", "list of syscalls to trace"),
        OPT_STRING('o', "output", &output_name, "file", "output file name"),
        OPT_STRING('i', "input", &input_name, "file", "Analyze events in file"),
        OPT_STRING('p', "pid", &trace.opts.target.pid, "pid",
                    "trace events on existing process id"),
        OPT_STRING('t', "tid", &trace.opts.target.tid, "tid",
                    "trace events on existing thread id"),
-       OPT_CALLBACK(0, "filter-pids", &trace, "float",
-                    "show only events with duration > N.M ms", trace__set_filter_pids),
+       OPT_CALLBACK(0, "filter-pids", &trace, "CSV list of pids",
+                    "pids to filter (by the kernel)", trace__set_filter_pids),
        OPT_BOOLEAN('a', "all-cpus", &trace.opts.target.system_wide,
                    "system-wide collection from all CPUs"),
        OPT_STRING('C', "cpu", &trace.opts.target.cpu_list, "cpu",
@@ -2702,6 +2798,8 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
                     "Trace pagefaults", parse_pagefaults, "maj"),
        OPT_BOOLEAN(0, "syscalls", &trace.trace_syscalls, "Trace syscalls"),
        OPT_BOOLEAN('f', "force", &trace.force, "don't complain, do it"),
+       OPT_UINTEGER(0, "proc-map-timeout", &trace.opts.proc_map_timeout,
+                       "per thread proc mmap processing timeout in ms"),
        OPT_END()
        };
        const char * const trace_subcommands[] = { "record", NULL };
@@ -2712,11 +2810,10 @@ int cmd_trace(int argc, const char **argv, const char *prefix __maybe_unused)
        signal(SIGFPE, sighandler_dump_stack);
 
        trace.evlist = perf_evlist__new();
-       if (trace.evlist == NULL)
-               return -ENOMEM;
 
        if (trace.evlist == NULL) {
                pr_err("Not enough memory to run!\n");
+               err = -ENOMEM;
                goto out;
        }
 
index 59a98c6432403874a564753dd6e790b8632b672e..317001c946608be1430d03d485fe19586f1e0446 100644 (file)
@@ -32,7 +32,7 @@ ifeq ($(ARCH),x86)
     LIBUNWIND_LIBS = -lunwind -lunwind-x86_64
     $(call detected,CONFIG_X86_64)
   else
-    LIBUNWIND_LIBS = -lunwind -lunwind-x86
+    LIBUNWIND_LIBS = -lunwind-x86 -llzma -lunwind
   endif
   NO_PERF_REGS := 0
 endif
@@ -130,6 +130,8 @@ endif
 
 ifeq ($(DEBUG),0)
   CFLAGS += -O6
+else
+  CFLAGS += $(call cc-option,-Og,-O0)
 endif
 
 ifdef PARSER_DEBUG
@@ -268,6 +270,10 @@ else
   endif # libelf support
 endif # NO_LIBELF
 
+ifdef NO_DWARF
+  NO_LIBDW_DWARF_UNWIND := 1
+endif
+
 ifndef NO_LIBELF
   CFLAGS += -DHAVE_LIBELF_SUPPORT
   EXTLIBS += -lelf
@@ -610,6 +616,11 @@ ifdef LIBBABELTRACE
   endif
 endif
 
+ifndef NO_AUXTRACE
+  $(call detected,CONFIG_AUXTRACE)
+  CFLAGS += -DHAVE_AUXTRACE_SUPPORT
+endif
+
 # Among the variables below, these:
 #   perfexecdir
 #   template_dir
index c16ce833079c0a307642f2ae0e75f9c0d577c4d8..0ebef09c0842f89e16df8404931f884d7796c1ca 100644 (file)
@@ -177,3 +177,22 @@ $(if $($(1)),$(call _ge_attempt,$($(1)),$(1)),$(call _ge_attempt,$(2)))
 endef
 _ge_attempt = $(if $(get-executable),$(get-executable),$(call _gea_err,$(2)))
 _gea_err  = $(if $(1),$(error Please set '$(1)' appropriately))
+
+# try-run
+# Usage: option = $(call try-run, $(CC)...-o "$$TMP",option-ok,otherwise)
+# Exit code chooses option. "$$TMP" is can be used as temporary file and
+# is automatically cleaned up.
+try-run = $(shell set -e;              \
+       TMP="$(TMPOUT).$$$$.tmp";       \
+       TMPO="$(TMPOUT).$$$$.o";        \
+       if ($(1)) >/dev/null 2>&1;      \
+       then echo "$(2)";               \
+       else echo "$(3)";               \
+       fi;                             \
+       rm -f "$$TMP" "$$TMPO")
+
+# cc-option
+# Usage: cflags-y += $(call cc-option,-march=winchip-c6,-march=i586)
+
+cc-option = $(call try-run,\
+       $(CC) $(KBUILD_CPPFLAGS) $(KBUILD_CFLAGS) $(1) -c -x c /dev/null -o "$$TMP",$(1),$(2))
index 6ef68165c9db628d23bbe85b48945ac2581ec979..83a25cef82fdd2747ab0bc7f8b4fdc50c3c65b62 100644 (file)
@@ -6,11 +6,9 @@
 #include <sys/syscall.h>
 #include <linux/types.h>
 #include <linux/perf_event.h>
+#include <asm/barrier.h>
 
 #if defined(__i386__)
-#define mb()           asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
-#define wmb()          asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
-#define rmb()          asm volatile("lock; addl $0,0(%%esp)" ::: "memory")
 #define cpu_relax()    asm volatile("rep; nop" ::: "memory");
 #define CPUINFO_PROC   {"model name"}
 #ifndef __NR_perf_event_open
@@ -25,9 +23,6 @@
 #endif
 
 #if defined(__x86_64__)
-#define mb()           asm volatile("mfence" ::: "memory")
-#define wmb()          asm volatile("sfence" ::: "memory")
-#define rmb()          asm volatile("lfence" ::: "memory")
 #define cpu_relax()    asm volatile("rep; nop" ::: "memory");
 #define CPUINFO_PROC   {"model name"}
 #ifndef __NR_perf_event_open
 
 #ifdef __powerpc__
 #include "../../arch/powerpc/include/uapi/asm/unistd.h"
-#define mb()           asm volatile ("sync" ::: "memory")
-#define wmb()          asm volatile ("sync" ::: "memory")
-#define rmb()          asm volatile ("sync" ::: "memory")
 #define CPUINFO_PROC   {"cpu"}
 #endif
 
 #ifdef __s390__
-#define mb()           asm volatile("bcr 15,0" ::: "memory")
-#define wmb()          asm volatile("bcr 15,0" ::: "memory")
-#define rmb()          asm volatile("bcr 15,0" ::: "memory")
 #define CPUINFO_PROC   {"vendor_id"}
 #endif
 
 #ifdef __sh__
-#if defined(__SH4A__) || defined(__SH5__)
-# define mb()          asm volatile("synco" ::: "memory")
-# define wmb()         asm volatile("synco" ::: "memory")
-# define rmb()         asm volatile("synco" ::: "memory")
-#else
-# define mb()          asm volatile("" ::: "memory")
-# define wmb()         asm volatile("" ::: "memory")
-# define rmb()         asm volatile("" ::: "memory")
-#endif
 #define CPUINFO_PROC   {"cpu type"}
 #endif
 
 #ifdef __hppa__
-#define mb()           asm volatile("" ::: "memory")
-#define wmb()          asm volatile("" ::: "memory")
-#define rmb()          asm volatile("" ::: "memory")
 #define CPUINFO_PROC   {"cpu"}
 #endif
 
 #ifdef __sparc__
-#ifdef __LP64__
-#define mb()           asm volatile("ba,pt %%xcc, 1f\n"        \
-                                    "membar #StoreLoad\n"      \
-                                    "1:\n":::"memory")
-#else
-#define mb()           asm volatile("":::"memory")
-#endif
-#define wmb()          asm volatile("":::"memory")
-#define rmb()          asm volatile("":::"memory")
 #define CPUINFO_PROC   {"cpu"}
 #endif
 
 #ifdef __alpha__
-#define mb()           asm volatile("mb" ::: "memory")
-#define wmb()          asm volatile("wmb" ::: "memory")
-#define rmb()          asm volatile("mb" ::: "memory")
 #define CPUINFO_PROC   {"cpu model"}
 #endif
 
 #ifdef __ia64__
-#define mb()           asm volatile ("mf" ::: "memory")
-#define wmb()          asm volatile ("mf" ::: "memory")
-#define rmb()          asm volatile ("mf" ::: "memory")
 #define cpu_relax()    asm volatile ("hint @pause" ::: "memory")
 #define CPUINFO_PROC   {"model name"}
 #endif
 
 #ifdef __arm__
-/*
- * Use the __kuser_memory_barrier helper in the CPU helper page. See
- * arch/arm/kernel/entry-armv.S in the kernel source for details.
- */
-#define mb()           ((void(*)(void))0xffff0fa0)()
-#define wmb()          ((void(*)(void))0xffff0fa0)()
-#define rmb()          ((void(*)(void))0xffff0fa0)()
 #define CPUINFO_PROC   {"model name", "Processor"}
 #endif
 
 #ifdef __aarch64__
-#define mb()           asm volatile("dmb ish" ::: "memory")
-#define wmb()          asm volatile("dmb ishst" ::: "memory")
-#define rmb()          asm volatile("dmb ishld" ::: "memory")
 #define cpu_relax()    asm volatile("yield" ::: "memory")
 #endif
 
 #ifdef __mips__
-#define mb()           asm volatile(                                   \
-                               ".set   mips2\n\t"                      \
-                               "sync\n\t"                              \
-                               ".set   mips0"                          \
-                               : /* no output */                       \
-                               : /* no input */                        \
-                               : "memory")
-#define wmb()  mb()
-#define rmb()  mb()
 #define CPUINFO_PROC   {"cpu model"}
 #endif
 
 #ifdef __arc__
-#define mb()           asm volatile("" ::: "memory")
-#define wmb()          asm volatile("" ::: "memory")
-#define rmb()          asm volatile("" ::: "memory")
 #define CPUINFO_PROC   {"Processor"}
 #endif
 
 #ifdef __metag__
-#define mb()           asm volatile("" ::: "memory")
-#define wmb()          asm volatile("" ::: "memory")
-#define rmb()          asm volatile("" ::: "memory")
 #define CPUINFO_PROC   {"CPU"}
 #endif
 
 #ifdef __xtensa__
-#define mb()           asm volatile("memw" ::: "memory")
-#define wmb()          asm volatile("memw" ::: "memory")
-#define rmb()          asm volatile("" ::: "memory")
 #define CPUINFO_PROC   {"core ID"}
 #endif
 
 #ifdef __tile__
-#define mb()           asm volatile ("mf" ::: "memory")
-#define wmb()          asm volatile ("mf" ::: "memory")
-#define rmb()          asm volatile ("mf" ::: "memory")
 #define cpu_relax()    asm volatile ("mfspr zero, PASS" ::: "memory")
 #define CPUINFO_PROC    {"model name"}
 #endif
 
-#define barrier() asm volatile ("" ::: "memory")
-
 #ifndef cpu_relax
 #define cpu_relax() barrier()
 #endif
index e14bb637255cc351ac40850a148eba1461d55cd0..4a5827fff7993d2bbe87666af51aa939f6f9dd1d 100644 (file)
@@ -54,16 +54,22 @@ struct record_opts {
        bool         period;
        bool         sample_intr_regs;
        bool         running_time;
+       bool         full_auxtrace;
+       bool         auxtrace_snapshot_mode;
        unsigned int freq;
        unsigned int mmap_pages;
+       unsigned int auxtrace_mmap_pages;
        unsigned int user_freq;
        u64          branch_stack;
        u64          default_interval;
        u64          user_interval;
+       size_t       auxtrace_snapshot_size;
+       const char   *auxtrace_snapshot_opts;
        bool         sample_transaction;
        unsigned     initial_delay;
        bool         use_clockid;
        clockid_t    clockid;
+       unsigned int proc_map_timeout;
 };
 
 struct option;
index 6a8801b32017018a09233390e7e5856afa4ae857..ee41e705b2eba7b726e417a1fdd73a421b7a985b 100644 (file)
@@ -3,9 +3,9 @@ perf-y += parse-events.o
 perf-y += dso-data.o
 perf-y += attr.o
 perf-y += vmlinux-kallsyms.o
-perf-y += open-syscall.o
-perf-y += open-syscall-all-cpus.o
-perf-y += open-syscall-tp-fields.o
+perf-y += openat-syscall.o
+perf-y += openat-syscall-all-cpus.o
+perf-y += openat-syscall-tp-fields.o
 perf-y += mmap-basic.o
 perf-y += perf-record.o
 perf-y += rdpmc.o
@@ -34,7 +34,7 @@ perf-y += kmod-path.o
 
 perf-$(CONFIG_X86) += perf-time-to-tsc.o
 
-ifeq ($(ARCH),$(filter $(ARCH),x86 arm))
+ifeq ($(ARCH),$(filter $(ARCH),x86 arm arm64))
 perf-$(CONFIG_DWARF_UNWIND) += dwarf-unwind.o
 endif
 
index 4f409816711249a540c77eb4136ec7d24e94991c..87b9961646e4a5f08728e694549195d9eeb2ad86 100644 (file)
@@ -23,12 +23,12 @@ static struct test {
                .func = test__vmlinux_matches_kallsyms,
        },
        {
-               .desc = "detect open syscall event",
-               .func = test__open_syscall_event,
+               .desc = "detect openat syscall event",
+               .func = test__openat_syscall_event,
        },
        {
-               .desc = "detect open syscall event on all cpus",
-               .func = test__open_syscall_event_on_all_cpus,
+               .desc = "detect openat syscall event on all cpus",
+               .func = test__openat_syscall_event_on_all_cpus,
        },
        {
                .desc = "read samples using the mmap interface",
@@ -73,8 +73,8 @@ static struct test {
                .func = test__perf_evsel__tp_sched_test,
        },
        {
-               .desc = "Generate and check syscalls:sys_enter_open event fields",
-               .func = test__syscall_open_tp_fields,
+               .desc = "Generate and check syscalls:sys_enter_openat event fields",
+               .func = test__syscall_openat_tp_fields,
        },
        {
                .desc = "struct perf_event_attr setup",
@@ -126,7 +126,7 @@ static struct test {
                .desc = "Test parsing with no sample_id_all bit set",
                .func = test__parse_no_sample_id_all,
        },
-#if defined(__x86_64__) || defined(__i386__) || defined(__arm__)
+#if defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__)
 #ifdef HAVE_DWARF_UNWIND_SUPPORT
        {
                .desc = "Test dwarf unwind",
@@ -219,7 +219,7 @@ static int run_test(struct test *test)
        wait(&status);
 
        if (WIFEXITED(status)) {
-               err = WEXITSTATUS(status);
+               err = (signed char)WEXITSTATUS(status);
                pr_debug("test child finished with %d\n", err);
        } else if (WIFSIGNALED(status)) {
                err = -1;
index f671ec37a7c40c1346ebe92e77a6d201bc89305e..22f8a00446e1f1b3cb6b447dbc1bc21ccfda3108 100644 (file)
@@ -248,6 +248,7 @@ static int process_sample_event(struct machine *machine,
        struct perf_sample sample;
        struct thread *thread;
        u8 cpumode;
+       int ret;
 
        if (perf_evlist__parse_sample(evlist, event, &sample)) {
                pr_debug("perf_evlist__parse_sample failed\n");
@@ -262,7 +263,9 @@ static int process_sample_event(struct machine *machine,
 
        cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
 
-       return read_object_code(sample.ip, READLEN, cpumode, thread, state);
+       ret = read_object_code(sample.ip, READLEN, cpumode, thread, state);
+       thread__put(thread);
+       return ret;
 }
 
 static int process_event(struct machine *machine, struct perf_evlist *evlist,
@@ -448,7 +451,7 @@ static int do_test_code_reading(bool try_kcore)
        }
 
        ret = perf_event__synthesize_thread_map(NULL, threads,
-                                               perf_event__process, machine, false);
+                                               perf_event__process, machine, false, 500);
        if (ret < 0) {
                pr_debug("perf_event__synthesize_thread_map failed\n");
                goto out_err;
@@ -457,13 +460,13 @@ static int do_test_code_reading(bool try_kcore)
        thread = machine__findnew_thread(machine, pid, pid);
        if (!thread) {
                pr_debug("machine__findnew_thread failed\n");
-               goto out_err;
+               goto out_put;
        }
 
        cpus = cpu_map__new(NULL);
        if (!cpus) {
                pr_debug("cpu_map__new failed\n");
-               goto out_err;
+               goto out_put;
        }
 
        while (1) {
@@ -472,7 +475,7 @@ static int do_test_code_reading(bool try_kcore)
                evlist = perf_evlist__new();
                if (!evlist) {
                        pr_debug("perf_evlist__new failed\n");
-                       goto out_err;
+                       goto out_put;
                }
 
                perf_evlist__set_maps(evlist, cpus, threads);
@@ -482,10 +485,10 @@ static int do_test_code_reading(bool try_kcore)
                else
                        str = "cycles";
                pr_debug("Parsing event '%s'\n", str);
-               ret = parse_events(evlist, str);
+               ret = parse_events(evlist, str, NULL);
                if (ret < 0) {
                        pr_debug("parse_events failed\n");
-                       goto out_err;
+                       goto out_put;
                }
 
                perf_evlist__config(evlist, &opts);
@@ -506,7 +509,7 @@ static int do_test_code_reading(bool try_kcore)
                                continue;
                        }
                        pr_debug("perf_evlist__open failed\n");
-                       goto out_err;
+                       goto out_put;
                }
                break;
        }
@@ -514,7 +517,7 @@ static int do_test_code_reading(bool try_kcore)
        ret = perf_evlist__mmap(evlist, UINT_MAX, false);
        if (ret < 0) {
                pr_debug("perf_evlist__mmap failed\n");
-               goto out_err;
+               goto out_put;
        }
 
        perf_evlist__enable(evlist);
@@ -525,7 +528,7 @@ static int do_test_code_reading(bool try_kcore)
 
        ret = process_events(machine, evlist, &state);
        if (ret < 0)
-               goto out_err;
+               goto out_put;
 
        if (!have_vmlinux && !have_kcore && !try_kcore)
                err = TEST_CODE_READING_NO_KERNEL_OBJ;
@@ -535,7 +538,10 @@ static int do_test_code_reading(bool try_kcore)
                err = TEST_CODE_READING_NO_ACCESS;
        else
                err = TEST_CODE_READING_OK;
+out_put:
+       thread__put(thread);
 out_err:
+
        if (evlist) {
                perf_evlist__delete(evlist);
        } else {
index 513e5febbe5a5016ed5d9a2564bea1e5bc92e4e8..a218aeaf56a002396bf0d0db9ef0e457a7445c9f 100644 (file)
@@ -99,6 +99,17 @@ struct test_data_offset offsets[] = {
        },
 };
 
+/* move it from util/dso.c for compatibility */
+static int dso__data_fd(struct dso *dso, struct machine *machine)
+{
+       int fd = dso__data_get_fd(dso, machine);
+
+       if (fd >= 0)
+               dso__data_put_fd(dso);
+
+       return fd;
+}
+
 int test__dso_data(void)
 {
        struct machine machine;
@@ -155,7 +166,7 @@ int test__dso_data(void)
                free(buf);
        }
 
-       dso__delete(dso);
+       dso__put(dso);
        unlink(file);
        return 0;
 }
@@ -215,7 +226,7 @@ static void dsos__delete(int cnt)
                struct dso *dso = dsos[i];
 
                unlink(dso->name);
-               dso__delete(dso);
+               dso__put(dso);
        }
 
        free(dsos);
index 0bf06bec68c7e9786668990ad399b578326726c5..40b36c4624275a360d4a0f3226eaf850d6d0e825 100644 (file)
@@ -28,7 +28,7 @@ static int init_live_machine(struct machine *machine)
        pid_t pid = getpid();
 
        return perf_event__synthesize_mmap_events(NULL, &event, pid, pid,
-                                                 mmap_handler, machine, true);
+                                                 mmap_handler, machine, true, 500);
 }
 
 #define MAX_STACK 8
@@ -170,6 +170,7 @@ int test__dwarf_unwind(void)
        }
 
        err = krava_1(thread);
+       thread__put(thread);
 
  out:
        machine__delete_threads(machine);
index b8d8341b383e7bc123c29301eff6fbe6e98be332..3fa715987a5ec2693e2bcdb31a33e3f20616c136 100644 (file)
@@ -23,7 +23,7 @@ static int perf_evsel__roundtrip_cache_name_test(void)
                        for (i = 0; i < PERF_COUNT_HW_CACHE_RESULT_MAX; i++) {
                                __perf_evsel__hw_cache_type_op_res_name(type, op, i,
                                                                        name, sizeof(name));
-                               err = parse_events(evlist, name);
+                               err = parse_events(evlist, name, NULL);
                                if (err)
                                        ret = err;
                        }
@@ -71,7 +71,7 @@ static int __perf_evsel__name_array_test(const char *names[], int nr_names)
                 return -ENOMEM;
 
        for (i = 0; i < nr_names; ++i) {
-               err = parse_events(evlist, names[i]);
+               err = parse_events(evlist, names[i], NULL);
                if (err) {
                        pr_debug("failed to parse event '%s', err %d\n",
                                 names[i], err);
index a62c091345163f70ea72ad2cb9dbc2297ae2da1a..ce80b274b097332d02b5502fb0c5b88fc6d6016a 100644 (file)
@@ -96,6 +96,7 @@ struct machine *setup_fake_machine(struct machines *machines)
                        goto out;
 
                thread__set_comm(thread, fake_threads[i].comm, 0);
+               thread__put(thread);
        }
 
        for (i = 0; i < ARRAY_SIZE(fake_mmap_info); i++) {
@@ -120,8 +121,7 @@ struct machine *setup_fake_machine(struct machines *machines)
                size_t k;
                struct dso *dso;
 
-               dso = __dsos__findnew(&machine->user_dsos,
-                                     fake_symbols[i].dso_name);
+               dso = machine__findnew_dso(machine, fake_symbols[i].dso_name);
                if (dso == NULL)
                        goto out;
 
@@ -134,11 +134,15 @@ struct machine *setup_fake_machine(struct machines *machines)
 
                        sym = symbol__new(fsym->start, fsym->length,
                                          STB_GLOBAL, fsym->name);
-                       if (sym == NULL)
+                       if (sym == NULL) {
+                               dso__put(dso);
                                goto out;
+                       }
 
                        symbols__insert(&dso->symbols[MAP__FUNCTION], sym);
                }
+
+               dso__put(dso);
        }
 
        return machine;
index 18619966454c572a0f3c0a1b2330818fc6e1ffe4..7d82c8be5e360da5f89b1ff7b569a798c5a34fe0 100644 (file)
@@ -87,6 +87,8 @@ static int add_hist_entries(struct hists *hists, struct machine *machine)
                        },
                };
                struct hist_entry_iter iter = {
+                       .evsel = evsel,
+                       .sample = &sample,
                        .hide_unresolved = false,
                };
 
@@ -104,9 +106,11 @@ static int add_hist_entries(struct hists *hists, struct machine *machine)
                                                  &sample) < 0)
                        goto out;
 
-               if (hist_entry_iter__add(&iter, &al, evsel, &sample,
-                                        PERF_MAX_STACK_DEPTH, NULL) < 0)
+               if (hist_entry_iter__add(&iter, &al, PERF_MAX_STACK_DEPTH,
+                                        NULL) < 0) {
+                       addr_location__put(&al);
                        goto out;
+               }
 
                fake_samples[i].thread = al.thread;
                fake_samples[i].map = al.map;
@@ -695,7 +699,7 @@ int test__hists_cumulate(void)
 
        TEST_ASSERT_VAL("No memory", evlist);
 
-       err = parse_events(evlist, "cpu-clock");
+       err = parse_events(evlist, "cpu-clock", NULL);
        if (err)
                goto out;
 
index 59e53db7914c0ad6100ab2e616cdf21e39efea46..ce48775e6ada13886000013183908f3f6b63f26d 100644 (file)
@@ -63,6 +63,8 @@ static int add_hist_entries(struct perf_evlist *evlist,
                                },
                        };
                        struct hist_entry_iter iter = {
+                               .evsel = evsel,
+                               .sample = &sample,
                                .ops = &hist_iter_normal,
                                .hide_unresolved = false,
                        };
@@ -81,9 +83,11 @@ static int add_hist_entries(struct perf_evlist *evlist,
                                                          &sample) < 0)
                                goto out;
 
-                       if (hist_entry_iter__add(&iter, &al, evsel, &sample,
-                                                PERF_MAX_STACK_DEPTH, NULL) < 0)
+                       if (hist_entry_iter__add(&iter, &al,
+                                                PERF_MAX_STACK_DEPTH, NULL) < 0) {
+                               addr_location__put(&al);
                                goto out;
+                       }
 
                        fake_samples[i].thread = al.thread;
                        fake_samples[i].map = al.map;
@@ -108,10 +112,10 @@ int test__hists_filter(void)
 
        TEST_ASSERT_VAL("No memory", evlist);
 
-       err = parse_events(evlist, "cpu-clock");
+       err = parse_events(evlist, "cpu-clock", NULL);
        if (err)
                goto out;
-       err = parse_events(evlist, "task-clock");
+       err = parse_events(evlist, "task-clock", NULL);
        if (err)
                goto out;
 
index 278ba8344c236d000a0388c3ef1c18b60350f554..8c102b0114249708e4ae2059c81732ab23793c4b 100644 (file)
@@ -91,8 +91,10 @@ static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
 
                        he = __hists__add_entry(hists, &al, NULL,
                                                NULL, NULL, 1, 1, 0, true);
-                       if (he == NULL)
+                       if (he == NULL) {
+                               addr_location__put(&al);
                                goto out;
+                       }
 
                        fake_common_samples[k].thread = al.thread;
                        fake_common_samples[k].map = al.map;
@@ -115,8 +117,10 @@ static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
 
                        he = __hists__add_entry(hists, &al, NULL,
                                                NULL, NULL, 1, 1, 0, true);
-                       if (he == NULL)
+                       if (he == NULL) {
+                               addr_location__put(&al);
                                goto out;
+                       }
 
                        fake_samples[i][k].thread = al.thread;
                        fake_samples[i][k].map = al.map;
@@ -282,10 +286,10 @@ int test__hists_link(void)
        if (evlist == NULL)
                 return -ENOMEM;
 
-       err = parse_events(evlist, "cpu-clock");
+       err = parse_events(evlist, "cpu-clock", NULL);
        if (err)
                goto out;
-       err = parse_events(evlist, "task-clock");
+       err = parse_events(evlist, "task-clock", NULL);
        if (err)
                goto out;
 
index b52c9faea22450ed4092d67acdb1eb15ce15c6a8..adbebc852cc8b58886a2618f973815f9284b7b3b 100644 (file)
@@ -57,6 +57,8 @@ static int add_hist_entries(struct hists *hists, struct machine *machine)
                        },
                };
                struct hist_entry_iter iter = {
+                       .evsel = evsel,
+                       .sample = &sample,
                        .ops = &hist_iter_normal,
                        .hide_unresolved = false,
                };
@@ -70,9 +72,11 @@ static int add_hist_entries(struct hists *hists, struct machine *machine)
                                                  &sample) < 0)
                        goto out;
 
-               if (hist_entry_iter__add(&iter, &al, evsel, &sample,
-                                        PERF_MAX_STACK_DEPTH, NULL) < 0)
+               if (hist_entry_iter__add(&iter, &al, PERF_MAX_STACK_DEPTH,
+                                        NULL) < 0) {
+                       addr_location__put(&al);
                        goto out;
+               }
 
                fake_samples[i].thread = al.thread;
                fake_samples[i].map = al.map;
@@ -590,7 +594,7 @@ int test__hists_output(void)
 
        TEST_ASSERT_VAL("No memory", evlist);
 
-       err = parse_events(evlist, "cpu-clock");
+       err = parse_events(evlist, "cpu-clock", NULL);
        if (err)
                goto out;
 
index 7a5ab7b0b8f698146794be7584af453a446f79ee..5b171d1e338bdd26bcf1343f58e8b0bdb314b71c 100644 (file)
@@ -78,8 +78,8 @@ int test__keep_tracking(void)
 
        perf_evlist__set_maps(evlist, cpus, threads);
 
-       CHECK__(parse_events(evlist, "dummy:u"));
-       CHECK__(parse_events(evlist, "cycles:u"));
+       CHECK__(parse_events(evlist, "dummy:u", NULL));
+       CHECK__(parse_events(evlist, "cycles:u", NULL));
 
        perf_evlist__config(evlist, &opts);
 
index e8d7cbb9320c58c987de7743bf4c08bcd8314361..08c433b4bf4f30c8f69307ba3fd5b0ec21802e2f 100644 (file)
@@ -34,9 +34,21 @@ static int test(const char *path, bool alloc_name, bool alloc_ext,
        return 0;
 }
 
+static int test_is_kernel_module(const char *path, int cpumode, bool expect)
+{
+       TEST_ASSERT_VAL("is_kernel_module",
+                       (!!is_kernel_module(path, cpumode)) == (!!expect));
+       pr_debug("%s (cpumode: %d) - is_kernel_module: %s\n",
+                       path, cpumode, expect ? "true" : "false");
+       return 0;
+}
+
 #define T(path, an, ae, k, c, n, e) \
        TEST_ASSERT_VAL("failed", !test(path, an, ae, k, c, n, e))
 
+#define M(path, c, e) \
+       TEST_ASSERT_VAL("failed", !test_is_kernel_module(path, c, e))
+
 int test__kmod_path__parse(void)
 {
        /* path                alloc_name  alloc_ext   kmod  comp   name     ext */
@@ -44,30 +56,90 @@ int test__kmod_path__parse(void)
        T("/xxxx/xxxx/x-x.ko", false     , true      , true, false, NULL   , NULL);
        T("/xxxx/xxxx/x-x.ko", true      , false     , true, false, "[x_x]", NULL);
        T("/xxxx/xxxx/x-x.ko", false     , false     , true, false, NULL   , NULL);
+       M("/xxxx/xxxx/x-x.ko", PERF_RECORD_MISC_CPUMODE_UNKNOWN, true);
+       M("/xxxx/xxxx/x-x.ko", PERF_RECORD_MISC_KERNEL, true);
+       M("/xxxx/xxxx/x-x.ko", PERF_RECORD_MISC_USER, false);
 
        /* path                alloc_name  alloc_ext   kmod  comp  name   ext */
        T("/xxxx/xxxx/x.ko.gz", true     , true      , true, true, "[x]", "gz");
        T("/xxxx/xxxx/x.ko.gz", false    , true      , true, true, NULL , "gz");
        T("/xxxx/xxxx/x.ko.gz", true     , false     , true, true, "[x]", NULL);
        T("/xxxx/xxxx/x.ko.gz", false    , false     , true, true, NULL , NULL);
+       M("/xxxx/xxxx/x.ko.gz", PERF_RECORD_MISC_CPUMODE_UNKNOWN, true);
+       M("/xxxx/xxxx/x.ko.gz", PERF_RECORD_MISC_KERNEL, true);
+       M("/xxxx/xxxx/x.ko.gz", PERF_RECORD_MISC_USER, false);
 
        /* path              alloc_name  alloc_ext  kmod   comp  name    ext */
        T("/xxxx/xxxx/x.gz", true      , true     , false, true, "x.gz" ,"gz");
        T("/xxxx/xxxx/x.gz", false     , true     , false, true, NULL   ,"gz");
        T("/xxxx/xxxx/x.gz", true      , false    , false, true, "x.gz" , NULL);
        T("/xxxx/xxxx/x.gz", false     , false    , false, true, NULL   , NULL);
+       M("/xxxx/xxxx/x.gz", PERF_RECORD_MISC_CPUMODE_UNKNOWN, false);
+       M("/xxxx/xxxx/x.gz", PERF_RECORD_MISC_KERNEL, false);
+       M("/xxxx/xxxx/x.gz", PERF_RECORD_MISC_USER, false);
 
        /* path   alloc_name  alloc_ext  kmod   comp  name     ext */
        T("x.gz", true      , true     , false, true, "x.gz", "gz");
        T("x.gz", false     , true     , false, true, NULL  , "gz");
        T("x.gz", true      , false    , false, true, "x.gz", NULL);
        T("x.gz", false     , false    , false, true, NULL  , NULL);
+       M("x.gz", PERF_RECORD_MISC_CPUMODE_UNKNOWN, false);
+       M("x.gz", PERF_RECORD_MISC_KERNEL, false);
+       M("x.gz", PERF_RECORD_MISC_USER, false);
 
        /* path      alloc_name  alloc_ext  kmod  comp  name  ext */
        T("x.ko.gz", true      , true     , true, true, "[x]", "gz");
        T("x.ko.gz", false     , true     , true, true, NULL , "gz");
        T("x.ko.gz", true      , false    , true, true, "[x]", NULL);
        T("x.ko.gz", false     , false    , true, true, NULL , NULL);
+       M("x.ko.gz", PERF_RECORD_MISC_CPUMODE_UNKNOWN, true);
+       M("x.ko.gz", PERF_RECORD_MISC_KERNEL, true);
+       M("x.ko.gz", PERF_RECORD_MISC_USER, false);
+
+       /* path            alloc_name  alloc_ext  kmod  comp   name             ext */
+       T("[test_module]", true      , true     , true, false, "[test_module]", NULL);
+       T("[test_module]", false     , true     , true, false, NULL           , NULL);
+       T("[test_module]", true      , false    , true, false, "[test_module]", NULL);
+       T("[test_module]", false     , false    , true, false, NULL           , NULL);
+       M("[test_module]", PERF_RECORD_MISC_CPUMODE_UNKNOWN, true);
+       M("[test_module]", PERF_RECORD_MISC_KERNEL, true);
+       M("[test_module]", PERF_RECORD_MISC_USER, false);
+
+       /* path            alloc_name  alloc_ext  kmod  comp   name             ext */
+       T("[test.module]", true      , true     , true, false, "[test.module]", NULL);
+       T("[test.module]", false     , true     , true, false, NULL           , NULL);
+       T("[test.module]", true      , false    , true, false, "[test.module]", NULL);
+       T("[test.module]", false     , false    , true, false, NULL           , NULL);
+       M("[test.module]", PERF_RECORD_MISC_CPUMODE_UNKNOWN, true);
+       M("[test.module]", PERF_RECORD_MISC_KERNEL, true);
+       M("[test.module]", PERF_RECORD_MISC_USER, false);
+
+       /* path     alloc_name  alloc_ext  kmod   comp   name      ext */
+       T("[vdso]", true      , true     , false, false, "[vdso]", NULL);
+       T("[vdso]", false     , true     , false, false, NULL    , NULL);
+       T("[vdso]", true      , false    , false, false, "[vdso]", NULL);
+       T("[vdso]", false     , false    , false, false, NULL    , NULL);
+       M("[vdso]", PERF_RECORD_MISC_CPUMODE_UNKNOWN, false);
+       M("[vdso]", PERF_RECORD_MISC_KERNEL, false);
+       M("[vdso]", PERF_RECORD_MISC_USER, false);
+
+       /* path         alloc_name  alloc_ext  kmod   comp   name          ext */
+       T("[vsyscall]", true      , true     , false, false, "[vsyscall]", NULL);
+       T("[vsyscall]", false     , true     , false, false, NULL        , NULL);
+       T("[vsyscall]", true      , false    , false, false, "[vsyscall]", NULL);
+       T("[vsyscall]", false     , false    , false, false, NULL        , NULL);
+       M("[vsyscall]", PERF_RECORD_MISC_CPUMODE_UNKNOWN, false);
+       M("[vsyscall]", PERF_RECORD_MISC_KERNEL, false);
+       M("[vsyscall]", PERF_RECORD_MISC_USER, false);
+
+       /* path                alloc_name  alloc_ext  kmod   comp   name      ext */
+       T("[kernel.kallsyms]", true      , true     , false, false, "[kernel.kallsyms]", NULL);
+       T("[kernel.kallsyms]", false     , true     , false, false, NULL               , NULL);
+       T("[kernel.kallsyms]", true      , false    , false, false, "[kernel.kallsyms]", NULL);
+       T("[kernel.kallsyms]", false     , false    , false, false, NULL               , NULL);
+       M("[kernel.kallsyms]", PERF_RECORD_MISC_CPUMODE_UNKNOWN, false);
+       M("[kernel.kallsyms]", PERF_RECORD_MISC_KERNEL, false);
+       M("[kernel.kallsyms]", PERF_RECORD_MISC_USER, false);
 
        return 0;
 }
index bff85324f799bd1eeba79413ebf1433faac0a2e1..65280d28662e4c72177a20a3cfa56f968a10c359 100644 (file)
@@ -32,6 +32,7 @@ make_no_backtrace   := NO_BACKTRACE=1
 make_no_libnuma     := NO_LIBNUMA=1
 make_no_libaudit    := NO_LIBAUDIT=1
 make_no_libbionic   := NO_LIBBIONIC=1
+make_no_auxtrace    := NO_AUXTRACE=1
 make_tags           := tags
 make_cscope         := cscope
 make_help           := help
@@ -52,7 +53,7 @@ make_static         := LDFLAGS=-static
 make_minimal        := NO_LIBPERL=1 NO_LIBPYTHON=1 NO_NEWT=1 NO_GTK2=1
 make_minimal        += NO_DEMANGLE=1 NO_LIBELF=1 NO_LIBUNWIND=1 NO_BACKTRACE=1
 make_minimal        += NO_LIBNUMA=1 NO_LIBAUDIT=1 NO_LIBBIONIC=1
-make_minimal        += NO_LIBDW_DWARF_UNWIND=1
+make_minimal        += NO_LIBDW_DWARF_UNWIND=1 NO_AUXTRACE=1
 
 # $(run) contains all available tests
 run := make_pure
@@ -74,6 +75,7 @@ run += make_no_backtrace
 run += make_no_libnuma
 run += make_no_libaudit
 run += make_no_libbionic
+run += make_no_auxtrace
 run += make_help
 run += make_doc
 run += make_perf_o
@@ -223,7 +225,19 @@ tarpkg:
        echo "- $@: $$cmd" && echo $$cmd > $@ && \
        ( eval $$cmd ) >> $@ 2>&1
 
-all: $(run) $(run_O) tarpkg
+make_kernelsrc:
+       @echo " - make -C <kernelsrc> tools/perf"
+       $(call clean); \
+       (make -C ../.. tools/perf) > $@ 2>&1 && \
+       test -x perf && rm -f $@ || (cat $@ ; false)
+
+make_kernelsrc_tools:
+       @echo " - make -C <kernelsrc>/tools perf"
+       $(call clean); \
+       (make -C ../../tools perf) > $@ 2>&1 && \
+       test -x perf && rm -f $@ || (cat $@ ; false)
+
+all: $(run) $(run_O) tarpkg make_kernelsrc make_kernelsrc_tools
        @echo OK
 
 out: $(run_O)
index 9b9622a33932dadf2e98bd249850c62f16ac66e5..5855cf47121003479ae63e859059a5ad8809c5ec 100644 (file)
@@ -23,10 +23,8 @@ int test__basic_mmap(void)
        struct cpu_map *cpus;
        struct perf_evlist *evlist;
        cpu_set_t cpu_set;
-       const char *syscall_names[] = { "getsid", "getppid", "getpgrp",
-                                       "getpgid", };
-       pid_t (*syscalls[])(void) = { (void *)getsid, getppid, getpgrp,
-                                     (void*)getpgid };
+       const char *syscall_names[] = { "getsid", "getppid", "getpgid", };
+       pid_t (*syscalls[])(void) = { (void *)getsid, getppid, (void*)getpgid };
 #define nsyscalls ARRAY_SIZE(syscall_names)
        unsigned int nr_events[nsyscalls],
                     expected_nr_events[nsyscalls], i, j;
index 2113f1c8611fb569b0fb6bc7f676477e7de0a666..7f48efa7e295f63a72f0aa083857658ce68c45cb 100644 (file)
@@ -129,7 +129,7 @@ static int synth_all(struct machine *machine)
 {
        return perf_event__synthesize_threads(NULL,
                                              perf_event__process,
-                                             machine, 0);
+                                             machine, 0, 500);
 }
 
 static int synth_process(struct machine *machine)
@@ -141,7 +141,7 @@ static int synth_process(struct machine *machine)
 
        err = perf_event__synthesize_thread_map(NULL, map,
                                                perf_event__process,
-                                               machine, 0);
+                                               machine, 0, 500);
 
        thread_map__delete(map);
        return err;
@@ -191,6 +191,8 @@ static int mmap_events(synth_cb synth)
                                      PERF_RECORD_MISC_USER, MAP__FUNCTION,
                                      (unsigned long) (td->map + 1), &al);
 
+               thread__put(thread);
+
                if (!al.map) {
                        pr_debug("failed, couldn't find map\n");
                        err = -1;
diff --git a/tools/perf/tests/open-syscall-all-cpus.c b/tools/perf/tests/open-syscall-all-cpus.c
deleted file mode 100644 (file)
index 3ec885c..0000000
+++ /dev/null
@@ -1,115 +0,0 @@
-#include "evsel.h"
-#include "tests.h"
-#include "thread_map.h"
-#include "cpumap.h"
-#include "debug.h"
-
-int test__open_syscall_event_on_all_cpus(void)
-{
-       int err = -1, fd, cpu;
-       struct cpu_map *cpus;
-       struct perf_evsel *evsel;
-       unsigned int nr_open_calls = 111, i;
-       cpu_set_t cpu_set;
-       struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
-       char sbuf[STRERR_BUFSIZE];
-
-       if (threads == NULL) {
-               pr_debug("thread_map__new\n");
-               return -1;
-       }
-
-       cpus = cpu_map__new(NULL);
-       if (cpus == NULL) {
-               pr_debug("cpu_map__new\n");
-               goto out_thread_map_delete;
-       }
-
-       CPU_ZERO(&cpu_set);
-
-       evsel = perf_evsel__newtp("syscalls", "sys_enter_open");
-       if (evsel == NULL) {
-               if (tracefs_configured())
-                       pr_debug("is tracefs mounted on /sys/kernel/tracing?\n");
-               else if (debugfs_configured())
-                       pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
-               else
-                       pr_debug("Neither tracefs or debugfs is enabled in this kernel\n");
-               goto out_thread_map_delete;
-       }
-
-       if (perf_evsel__open(evsel, cpus, threads) < 0) {
-               pr_debug("failed to open counter: %s, "
-                        "tweak /proc/sys/kernel/perf_event_paranoid?\n",
-                        strerror_r(errno, sbuf, sizeof(sbuf)));
-               goto out_evsel_delete;
-       }
-
-       for (cpu = 0; cpu < cpus->nr; ++cpu) {
-               unsigned int ncalls = nr_open_calls + cpu;
-               /*
-                * XXX eventually lift this restriction in a way that
-                * keeps perf building on older glibc installations
-                * without CPU_ALLOC. 1024 cpus in 2010 still seems
-                * a reasonable upper limit tho :-)
-                */
-               if (cpus->map[cpu] >= CPU_SETSIZE) {
-                       pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
-                       continue;
-               }
-
-               CPU_SET(cpus->map[cpu], &cpu_set);
-               if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
-                       pr_debug("sched_setaffinity() failed on CPU %d: %s ",
-                                cpus->map[cpu],
-                                strerror_r(errno, sbuf, sizeof(sbuf)));
-                       goto out_close_fd;
-               }
-               for (i = 0; i < ncalls; ++i) {
-                       fd = open("/etc/passwd", O_RDONLY);
-                       close(fd);
-               }
-               CPU_CLR(cpus->map[cpu], &cpu_set);
-       }
-
-       /*
-        * Here we need to explicitely preallocate the counts, as if
-        * we use the auto allocation it will allocate just for 1 cpu,
-        * as we start by cpu 0.
-        */
-       if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) {
-               pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
-               goto out_close_fd;
-       }
-
-       err = 0;
-
-       for (cpu = 0; cpu < cpus->nr; ++cpu) {
-               unsigned int expected;
-
-               if (cpus->map[cpu] >= CPU_SETSIZE)
-                       continue;
-
-               if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
-                       pr_debug("perf_evsel__read_on_cpu\n");
-                       err = -1;
-                       break;
-               }
-
-               expected = nr_open_calls + cpu;
-               if (evsel->counts->cpu[cpu].val != expected) {
-                       pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
-                                expected, cpus->map[cpu], evsel->counts->cpu[cpu].val);
-                       err = -1;
-               }
-       }
-
-       perf_evsel__free_counts(evsel);
-out_close_fd:
-       perf_evsel__close_fd(evsel, 1, threads->nr);
-out_evsel_delete:
-       perf_evsel__delete(evsel);
-out_thread_map_delete:
-       thread_map__delete(threads);
-       return err;
-}
diff --git a/tools/perf/tests/open-syscall-tp-fields.c b/tools/perf/tests/open-syscall-tp-fields.c
deleted file mode 100644 (file)
index 127dcae..0000000
+++ /dev/null
@@ -1,121 +0,0 @@
-#include "perf.h"
-#include "evlist.h"
-#include "evsel.h"
-#include "thread_map.h"
-#include "tests.h"
-#include "debug.h"
-
-int test__syscall_open_tp_fields(void)
-{
-       struct record_opts opts = {
-               .target = {
-                       .uid = UINT_MAX,
-                       .uses_mmap = true,
-               },
-               .no_buffering = true,
-               .freq         = 1,
-               .mmap_pages   = 256,
-               .raw_samples  = true,
-       };
-       const char *filename = "/etc/passwd";
-       int flags = O_RDONLY | O_DIRECTORY;
-       struct perf_evlist *evlist = perf_evlist__new();
-       struct perf_evsel *evsel;
-       int err = -1, i, nr_events = 0, nr_polls = 0;
-       char sbuf[STRERR_BUFSIZE];
-
-       if (evlist == NULL) {
-               pr_debug("%s: perf_evlist__new\n", __func__);
-               goto out;
-       }
-
-       evsel = perf_evsel__newtp("syscalls", "sys_enter_open");
-       if (evsel == NULL) {
-               pr_debug("%s: perf_evsel__newtp\n", __func__);
-               goto out_delete_evlist;
-       }
-
-       perf_evlist__add(evlist, evsel);
-
-       err = perf_evlist__create_maps(evlist, &opts.target);
-       if (err < 0) {
-               pr_debug("%s: perf_evlist__create_maps\n", __func__);
-               goto out_delete_evlist;
-       }
-
-       perf_evsel__config(evsel, &opts);
-
-       evlist->threads->map[0] = getpid();
-
-       err = perf_evlist__open(evlist);
-       if (err < 0) {
-               pr_debug("perf_evlist__open: %s\n",
-                        strerror_r(errno, sbuf, sizeof(sbuf)));
-               goto out_delete_evlist;
-       }
-
-       err = perf_evlist__mmap(evlist, UINT_MAX, false);
-       if (err < 0) {
-               pr_debug("perf_evlist__mmap: %s\n",
-                        strerror_r(errno, sbuf, sizeof(sbuf)));
-               goto out_delete_evlist;
-       }
-
-       perf_evlist__enable(evlist);
-
-       /*
-        * Generate the event:
-        */
-       open(filename, flags);
-
-       while (1) {
-               int before = nr_events;
-
-               for (i = 0; i < evlist->nr_mmaps; i++) {
-                       union perf_event *event;
-
-                       while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
-                               const u32 type = event->header.type;
-                               int tp_flags;
-                               struct perf_sample sample;
-
-                               ++nr_events;
-
-                               if (type != PERF_RECORD_SAMPLE) {
-                                       perf_evlist__mmap_consume(evlist, i);
-                                       continue;
-                               }
-
-                               err = perf_evsel__parse_sample(evsel, event, &sample);
-                               if (err) {
-                                       pr_err("Can't parse sample, err = %d\n", err);
-                                       goto out_delete_evlist;
-                               }
-
-                               tp_flags = perf_evsel__intval(evsel, &sample, "flags");
-
-                               if (flags != tp_flags) {
-                                       pr_debug("%s: Expected flags=%#x, got %#x\n",
-                                                __func__, flags, tp_flags);
-                                       goto out_delete_evlist;
-                               }
-
-                               goto out_ok;
-                       }
-               }
-
-               if (nr_events == before)
-                       perf_evlist__poll(evlist, 10);
-
-               if (++nr_polls > 5) {
-                       pr_debug("%s: no events!\n", __func__);
-                       goto out_delete_evlist;
-               }
-       }
-out_ok:
-       err = 0;
-out_delete_evlist:
-       perf_evlist__delete(evlist);
-out:
-       return err;
-}
diff --git a/tools/perf/tests/open-syscall.c b/tools/perf/tests/open-syscall.c
deleted file mode 100644 (file)
index 07aa319..0000000
+++ /dev/null
@@ -1,61 +0,0 @@
-#include "thread_map.h"
-#include "evsel.h"
-#include "debug.h"
-#include "tests.h"
-
-int test__open_syscall_event(void)
-{
-       int err = -1, fd;
-       struct perf_evsel *evsel;
-       unsigned int nr_open_calls = 111, i;
-       struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
-       char sbuf[STRERR_BUFSIZE];
-
-       if (threads == NULL) {
-               pr_debug("thread_map__new\n");
-               return -1;
-       }
-
-       evsel = perf_evsel__newtp("syscalls", "sys_enter_open");
-       if (evsel == NULL) {
-               if (tracefs_configured())
-                       pr_debug("is tracefs mounted on /sys/kernel/tracing?\n");
-               else if (debugfs_configured())
-                       pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
-               else
-                       pr_debug("Neither tracefs or debugfs is enabled in this kernel\n");
-               goto out_thread_map_delete;
-       }
-
-       if (perf_evsel__open_per_thread(evsel, threads) < 0) {
-               pr_debug("failed to open counter: %s, "
-                        "tweak /proc/sys/kernel/perf_event_paranoid?\n",
-                        strerror_r(errno, sbuf, sizeof(sbuf)));
-               goto out_evsel_delete;
-       }
-
-       for (i = 0; i < nr_open_calls; ++i) {
-               fd = open("/etc/passwd", O_RDONLY);
-               close(fd);
-       }
-
-       if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) {
-               pr_debug("perf_evsel__read_on_cpu\n");
-               goto out_close_fd;
-       }
-
-       if (evsel->counts->cpu[0].val != nr_open_calls) {
-               pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n",
-                        nr_open_calls, evsel->counts->cpu[0].val);
-               goto out_close_fd;
-       }
-
-       err = 0;
-out_close_fd:
-       perf_evsel__close_fd(evsel, 1, threads->nr);
-out_evsel_delete:
-       perf_evsel__delete(evsel);
-out_thread_map_delete:
-       thread_map__delete(threads);
-       return err;
-}
diff --git a/tools/perf/tests/openat-syscall-all-cpus.c b/tools/perf/tests/openat-syscall-all-cpus.c
new file mode 100644 (file)
index 0000000..9a7a116
--- /dev/null
@@ -0,0 +1,116 @@
+#include "evsel.h"
+#include "tests.h"
+#include "thread_map.h"
+#include "cpumap.h"
+#include "debug.h"
+#include "stat.h"
+
+int test__openat_syscall_event_on_all_cpus(void)
+{
+       int err = -1, fd, cpu;
+       struct cpu_map *cpus;
+       struct perf_evsel *evsel;
+       unsigned int nr_openat_calls = 111, i;
+       cpu_set_t cpu_set;
+       struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
+       char sbuf[STRERR_BUFSIZE];
+
+       if (threads == NULL) {
+               pr_debug("thread_map__new\n");
+               return -1;
+       }
+
+       cpus = cpu_map__new(NULL);
+       if (cpus == NULL) {
+               pr_debug("cpu_map__new\n");
+               goto out_thread_map_delete;
+       }
+
+       CPU_ZERO(&cpu_set);
+
+       evsel = perf_evsel__newtp("syscalls", "sys_enter_openat");
+       if (evsel == NULL) {
+               if (tracefs_configured())
+                       pr_debug("is tracefs mounted on /sys/kernel/tracing?\n");
+               else if (debugfs_configured())
+                       pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
+               else
+                       pr_debug("Neither tracefs or debugfs is enabled in this kernel\n");
+               goto out_thread_map_delete;
+       }
+
+       if (perf_evsel__open(evsel, cpus, threads) < 0) {
+               pr_debug("failed to open counter: %s, "
+                        "tweak /proc/sys/kernel/perf_event_paranoid?\n",
+                        strerror_r(errno, sbuf, sizeof(sbuf)));
+               goto out_evsel_delete;
+       }
+
+       for (cpu = 0; cpu < cpus->nr; ++cpu) {
+               unsigned int ncalls = nr_openat_calls + cpu;
+               /*
+                * XXX eventually lift this restriction in a way that
+                * keeps perf building on older glibc installations
+                * without CPU_ALLOC. 1024 cpus in 2010 still seems
+                * a reasonable upper limit tho :-)
+                */
+               if (cpus->map[cpu] >= CPU_SETSIZE) {
+                       pr_debug("Ignoring CPU %d\n", cpus->map[cpu]);
+                       continue;
+               }
+
+               CPU_SET(cpus->map[cpu], &cpu_set);
+               if (sched_setaffinity(0, sizeof(cpu_set), &cpu_set) < 0) {
+                       pr_debug("sched_setaffinity() failed on CPU %d: %s ",
+                                cpus->map[cpu],
+                                strerror_r(errno, sbuf, sizeof(sbuf)));
+                       goto out_close_fd;
+               }
+               for (i = 0; i < ncalls; ++i) {
+                       fd = openat(0, "/etc/passwd", O_RDONLY);
+                       close(fd);
+               }
+               CPU_CLR(cpus->map[cpu], &cpu_set);
+       }
+
+       /*
+        * Here we need to explicitely preallocate the counts, as if
+        * we use the auto allocation it will allocate just for 1 cpu,
+        * as we start by cpu 0.
+        */
+       if (perf_evsel__alloc_counts(evsel, cpus->nr) < 0) {
+               pr_debug("perf_evsel__alloc_counts(ncpus=%d)\n", cpus->nr);
+               goto out_close_fd;
+       }
+
+       err = 0;
+
+       for (cpu = 0; cpu < cpus->nr; ++cpu) {
+               unsigned int expected;
+
+               if (cpus->map[cpu] >= CPU_SETSIZE)
+                       continue;
+
+               if (perf_evsel__read_on_cpu(evsel, cpu, 0) < 0) {
+                       pr_debug("perf_evsel__read_on_cpu\n");
+                       err = -1;
+                       break;
+               }
+
+               expected = nr_openat_calls + cpu;
+               if (evsel->counts->cpu[cpu].val != expected) {
+                       pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls on cpu %d, got %" PRIu64 "\n",
+                                expected, cpus->map[cpu], evsel->counts->cpu[cpu].val);
+                       err = -1;
+               }
+       }
+
+       perf_evsel__free_counts(evsel);
+out_close_fd:
+       perf_evsel__close_fd(evsel, 1, threads->nr);
+out_evsel_delete:
+       perf_evsel__delete(evsel);
+out_thread_map_delete:
+       thread_map__delete(threads);
+       return err;
+}
diff --git a/tools/perf/tests/openat-syscall-tp-fields.c b/tools/perf/tests/openat-syscall-tp-fields.c
new file mode 100644 (file)
index 0000000..6245221
--- /dev/null
@@ -0,0 +1,121 @@
+#include "perf.h"
+#include "evlist.h"
+#include "evsel.h"
+#include "thread_map.h"
+#include "tests.h"
+#include "debug.h"
+
+int test__syscall_openat_tp_fields(void)
+{
+       struct record_opts opts = {
+               .target = {
+                       .uid = UINT_MAX,
+                       .uses_mmap = true,
+               },
+               .no_buffering = true,
+               .freq         = 1,
+               .mmap_pages   = 256,
+               .raw_samples  = true,
+       };
+       const char *filename = "/etc/passwd";
+       int flags = O_RDONLY | O_DIRECTORY;
+       struct perf_evlist *evlist = perf_evlist__new();
+       struct perf_evsel *evsel;
+       int err = -1, i, nr_events = 0, nr_polls = 0;
+       char sbuf[STRERR_BUFSIZE];
+
+       if (evlist == NULL) {
+               pr_debug("%s: perf_evlist__new\n", __func__);
+               goto out;
+       }
+
+       evsel = perf_evsel__newtp("syscalls", "sys_enter_openat");
+       if (evsel == NULL) {
+               pr_debug("%s: perf_evsel__newtp\n", __func__);
+               goto out_delete_evlist;
+       }
+
+       perf_evlist__add(evlist, evsel);
+
+       err = perf_evlist__create_maps(evlist, &opts.target);
+       if (err < 0) {
+               pr_debug("%s: perf_evlist__create_maps\n", __func__);
+               goto out_delete_evlist;
+       }
+
+       perf_evsel__config(evsel, &opts);
+
+       evlist->threads->map[0] = getpid();
+
+       err = perf_evlist__open(evlist);
+       if (err < 0) {
+               pr_debug("perf_evlist__open: %s\n",
+                        strerror_r(errno, sbuf, sizeof(sbuf)));
+               goto out_delete_evlist;
+       }
+
+       err = perf_evlist__mmap(evlist, UINT_MAX, false);
+       if (err < 0) {
+               pr_debug("perf_evlist__mmap: %s\n",
+                        strerror_r(errno, sbuf, sizeof(sbuf)));
+               goto out_delete_evlist;
+       }
+
+       perf_evlist__enable(evlist);
+
+       /*
+        * Generate the event:
+        */
+       openat(AT_FDCWD, filename, flags);
+
+       while (1) {
+               int before = nr_events;
+
+               for (i = 0; i < evlist->nr_mmaps; i++) {
+                       union perf_event *event;
+
+                       while ((event = perf_evlist__mmap_read(evlist, i)) != NULL) {
+                               const u32 type = event->header.type;
+                               int tp_flags;
+                               struct perf_sample sample;
+
+                               ++nr_events;
+
+                               if (type != PERF_RECORD_SAMPLE) {
+                                       perf_evlist__mmap_consume(evlist, i);
+                                       continue;
+                               }
+
+                               err = perf_evsel__parse_sample(evsel, event, &sample);
+                               if (err) {
+                                       pr_err("Can't parse sample, err = %d\n", err);
+                                       goto out_delete_evlist;
+                               }
+
+                               tp_flags = perf_evsel__intval(evsel, &sample, "flags");
+
+                               if (flags != tp_flags) {
+                                       pr_debug("%s: Expected flags=%#x, got %#x\n",
+                                                __func__, flags, tp_flags);
+                                       goto out_delete_evlist;
+                               }
+
+                               goto out_ok;
+                       }
+               }
+
+               if (nr_events == before)
+                       perf_evlist__poll(evlist, 10);
+
+               if (++nr_polls > 5) {
+                       pr_debug("%s: no events!\n", __func__);
+                       goto out_delete_evlist;
+               }
+       }
+out_ok:
+       err = 0;
+out_delete_evlist:
+       perf_evlist__delete(evlist);
+out:
+       return err;
+}
diff --git a/tools/perf/tests/openat-syscall.c b/tools/perf/tests/openat-syscall.c
new file mode 100644 (file)
index 0000000..9f9491b
--- /dev/null
@@ -0,0 +1,61 @@
+#include "thread_map.h"
+#include "evsel.h"
+#include "debug.h"
+#include "tests.h"
+
+int test__openat_syscall_event(void)
+{
+       int err = -1, fd;
+       struct perf_evsel *evsel;
+       unsigned int nr_openat_calls = 111, i;
+       struct thread_map *threads = thread_map__new(-1, getpid(), UINT_MAX);
+       char sbuf[STRERR_BUFSIZE];
+
+       if (threads == NULL) {
+               pr_debug("thread_map__new\n");
+               return -1;
+       }
+
+       evsel = perf_evsel__newtp("syscalls", "sys_enter_openat");
+       if (evsel == NULL) {
+               if (tracefs_configured())
+                       pr_debug("is tracefs mounted on /sys/kernel/tracing?\n");
+               else if (debugfs_configured())
+                       pr_debug("is debugfs mounted on /sys/kernel/debug?\n");
+               else
+                       pr_debug("Neither tracefs or debugfs is enabled in this kernel\n");
+               goto out_thread_map_delete;
+       }
+
+       if (perf_evsel__open_per_thread(evsel, threads) < 0) {
+               pr_debug("failed to open counter: %s, "
+                        "tweak /proc/sys/kernel/perf_event_paranoid?\n",
+                        strerror_r(errno, sbuf, sizeof(sbuf)));
+               goto out_evsel_delete;
+       }
+
+       for (i = 0; i < nr_openat_calls; ++i) {
+               fd = openat(0, "/etc/passwd", O_RDONLY);
+               close(fd);
+       }
+
+       if (perf_evsel__read_on_cpu(evsel, 0, 0) < 0) {
+               pr_debug("perf_evsel__read_on_cpu\n");
+               goto out_close_fd;
+       }
+
+       if (evsel->counts->cpu[0].val != nr_openat_calls) {
+               pr_debug("perf_evsel__read_on_cpu: expected to intercept %d calls, got %" PRIu64 "\n",
+                        nr_openat_calls, evsel->counts->cpu[0].val);
+               goto out_close_fd;
+       }
+
+       err = 0;
+out_close_fd:
+       perf_evsel__close_fd(evsel, 1, threads->nr);
+out_evsel_delete:
+       perf_evsel__delete(evsel);
+out_thread_map_delete:
+       thread_map__delete(threads);
+       return err;
+}
index 3de744961739c2c1502e0c0367c357b2f39c90a6..d76963f7ad3d4a0a117af25f69bcc364d475fa11 100644 (file)
@@ -427,7 +427,7 @@ static int test__checkevent_list(struct perf_evlist *evlist)
        TEST_ASSERT_VAL("wrong exclude_hv", !evsel->attr.exclude_hv);
        TEST_ASSERT_VAL("wrong precise_ip", !evsel->attr.precise_ip);
 
-       /* syscalls:sys_enter_open:k */
+       /* syscalls:sys_enter_openat:k */
        evsel = perf_evsel__next(evsel);
        TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type);
        TEST_ASSERT_VAL("wrong sample_type",
@@ -665,7 +665,7 @@ static int test__group3(struct perf_evlist *evlist __maybe_unused)
        TEST_ASSERT_VAL("wrong number of entries", 5 == evlist->nr_entries);
        TEST_ASSERT_VAL("wrong number of groups", 2 == evlist->nr_groups);
 
-       /* group1 syscalls:sys_enter_open:H */
+       /* group1 syscalls:sys_enter_openat:H */
        evsel = leader = perf_evlist__first(evlist);
        TEST_ASSERT_VAL("wrong type", PERF_TYPE_TRACEPOINT == evsel->attr.type);
        TEST_ASSERT_VAL("wrong sample_type",
@@ -1293,7 +1293,7 @@ struct evlist_test {
 
 static struct evlist_test test__events[] = {
        {
-               .name  = "syscalls:sys_enter_open",
+               .name  = "syscalls:sys_enter_openat",
                .check = test__checkevent_tracepoint,
                .id    = 0,
        },
@@ -1353,7 +1353,7 @@ static struct evlist_test test__events[] = {
                .id    = 11,
        },
        {
-               .name  = "syscalls:sys_enter_open:k",
+               .name  = "syscalls:sys_enter_openat:k",
                .check = test__checkevent_tracepoint_modifier,
                .id    = 12,
        },
@@ -1408,7 +1408,7 @@ static struct evlist_test test__events[] = {
                .id    = 22,
        },
        {
-               .name  = "r1,syscalls:sys_enter_open:k,1:1:hp",
+               .name  = "r1,syscalls:sys_enter_openat:k,1:1:hp",
                .check = test__checkevent_list,
                .id    = 23,
        },
@@ -1443,7 +1443,7 @@ static struct evlist_test test__events[] = {
                .id    = 29,
        },
        {
-               .name  = "group1{syscalls:sys_enter_open:H,cycles:kppp},group2{cycles,1:3}:G,instructions:u",
+               .name  = "group1{syscalls:sys_enter_openat:H,cycles:kppp},group2{cycles,1:3}:G,instructions:u",
                .check = test__group3,
                .id    = 30,
        },
@@ -1571,7 +1571,7 @@ static int test_event(struct evlist_test *e)
        if (evlist == NULL)
                return -ENOMEM;
 
-       ret = parse_events(evlist, e->name);
+       ret = parse_events(evlist, e->name, NULL);
        if (ret) {
                pr_debug("failed to parse event '%s', err %d\n",
                         e->name, ret);
index f238442b238a297d11e0622f275bf847e5b7db94..5f49484f1abc03bed80d035c508501914a577caf 100644 (file)
@@ -68,7 +68,7 @@ int test__perf_time_to_tsc(void)
 
        perf_evlist__set_maps(evlist, cpus, threads);
 
-       CHECK__(parse_events(evlist, "cycles:u"));
+       CHECK__(parse_events(evlist, "cycles:u", NULL));
 
        perf_evlist__config(evlist, &opts);
 
index eeb68bb1972d44e41bafa5fc10809700e4afc630..faa04e9d5d5fc751a1ac8082522fb045f108a060 100644 (file)
@@ -152,7 +152,8 @@ int test__pmu(void)
                if (ret)
                        break;
 
-               ret = perf_pmu__config_terms(&formats, &attr, terms, false);
+               ret = perf_pmu__config_terms(&formats, &attr, terms,
+                                            false, NULL);
                if (ret)
                        break;
 
index cc68648c7c555210c17c7c3d8d6b61eb14a39e73..0d31403ea593c7d2e689056af1670a18423a39ed 100644 (file)
@@ -347,7 +347,7 @@ int test__switch_tracking(void)
        perf_evlist__set_maps(evlist, cpus, threads);
 
        /* First event */
-       err = parse_events(evlist, "cpu-clock:u");
+       err = parse_events(evlist, "cpu-clock:u", NULL);
        if (err) {
                pr_debug("Failed to parse event dummy:u\n");
                goto out_err;
@@ -356,7 +356,7 @@ int test__switch_tracking(void)
        cpu_clocks_evsel = perf_evlist__last(evlist);
 
        /* Second event */
-       err = parse_events(evlist, "cycles:u");
+       err = parse_events(evlist, "cycles:u", NULL);
        if (err) {
                pr_debug("Failed to parse event cycles:u\n");
                goto out_err;
@@ -371,7 +371,7 @@ int test__switch_tracking(void)
                goto out;
        }
 
-       err = parse_events(evlist, sched_switch);
+       err = parse_events(evlist, sched_switch, NULL);
        if (err) {
                pr_debug("Failed to parse event %s\n", sched_switch);
                goto out_err;
@@ -401,7 +401,7 @@ int test__switch_tracking(void)
        perf_evsel__set_sample_bit(cycles_evsel, TIME);
 
        /* Fourth event */
-       err = parse_events(evlist, "dummy:u");
+       err = parse_events(evlist, "dummy:u", NULL);
        if (err) {
                pr_debug("Failed to parse event dummy:u\n");
                goto out_err;
index 52758a33f64c5679bb39a2a545320d9a3a44d303..8e5038b48ba8dfe3313d9c508156ae9b4ecb8c5a 100644 (file)
@@ -9,6 +9,15 @@ do {                                                                    \
        }                                                                \
 } while (0)
 
+#define TEST_ASSERT_EQUAL(text, val, expected)                          \
+do {                                                                    \
+       if (val != expected) {                                           \
+               pr_debug("FAILED %s:%d %s (%d != %d)\n",                 \
+                        __FILE__, __LINE__, text, val, expected);       \
+               return -1;                                               \
+       }                                                                \
+} while (0)
+
 enum {
        TEST_OK   =  0,
        TEST_FAIL = -1,
@@ -17,14 +26,14 @@ enum {
 
 /* Tests */
 int test__vmlinux_matches_kallsyms(void);
-int test__open_syscall_event(void);
-int test__open_syscall_event_on_all_cpus(void);
+int test__openat_syscall_event(void);
+int test__openat_syscall_event_on_all_cpus(void);
 int test__basic_mmap(void);
 int test__PERF_RECORD(void);
 int test__rdpmc(void);
 int test__perf_evsel__roundtrip_name_test(void);
 int test__perf_evsel__tp_sched_test(void);
-int test__syscall_open_tp_fields(void);
+int test__syscall_openat_tp_fields(void);
 int test__pmu(void);
 int test__attr(void);
 int test__dso_data(void);
@@ -53,7 +62,7 @@ int test__fdarray__filter(void);
 int test__fdarray__add(void);
 int test__kmod_path__parse(void);
 
-#if defined(__x86_64__) || defined(__i386__) || defined(__arm__)
+#if defined(__x86_64__) || defined(__i386__) || defined(__arm__) || defined(__aarch64__)
 #ifdef HAVE_DWARF_UNWIND_SUPPORT
 struct thread;
 struct perf_sample;
index b028499dd3cf0f5ee7530a6c869aaa40ef802521..01fabb19d74607bb9157f0dbddfd160d21c8cebf 100644 (file)
@@ -43,7 +43,7 @@ int test__thread_mg_share(void)
                        leader && t1 && t2 && t3 && other);
 
        mg = leader->mg;
-       TEST_ASSERT_VAL("wrong refcnt", mg->refcnt == 4);
+       TEST_ASSERT_EQUAL("wrong refcnt", atomic_read(&mg->refcnt), 4);
 
        /* test the map groups pointer is shared */
        TEST_ASSERT_VAL("map groups don't match", mg == t1->mg);
@@ -58,33 +58,40 @@ int test__thread_mg_share(void)
        other_leader = machine__find_thread(machine, 4, 4);
        TEST_ASSERT_VAL("failed to find other leader", other_leader);
 
+       /*
+        * Ok, now that all the rbtree related operations were done,
+        * lets remove all of them from there so that we can do the
+        * refcounting tests.
+        */
+       machine__remove_thread(machine, leader);
+       machine__remove_thread(machine, t1);
+       machine__remove_thread(machine, t2);
+       machine__remove_thread(machine, t3);
+       machine__remove_thread(machine, other);
+       machine__remove_thread(machine, other_leader);
+
        other_mg = other->mg;
-       TEST_ASSERT_VAL("wrong refcnt", other_mg->refcnt == 2);
+       TEST_ASSERT_EQUAL("wrong refcnt", atomic_read(&other_mg->refcnt), 2);
 
        TEST_ASSERT_VAL("map groups don't match", other_mg == other_leader->mg);
 
        /* release thread group */
-       thread__delete(leader);
-       TEST_ASSERT_VAL("wrong refcnt", mg->refcnt == 3);
+       thread__put(leader);
+       TEST_ASSERT_EQUAL("wrong refcnt", atomic_read(&mg->refcnt), 3);
 
-       thread__delete(t1);
-       TEST_ASSERT_VAL("wrong refcnt", mg->refcnt == 2);
+       thread__put(t1);
+       TEST_ASSERT_EQUAL("wrong refcnt", atomic_read(&mg->refcnt), 2);
 
-       thread__delete(t2);
-       TEST_ASSERT_VAL("wrong refcnt", mg->refcnt == 1);
+       thread__put(t2);
+       TEST_ASSERT_EQUAL("wrong refcnt", atomic_read(&mg->refcnt), 1);
 
-       thread__delete(t3);
+       thread__put(t3);
 
        /* release other group  */
-       thread__delete(other_leader);
-       TEST_ASSERT_VAL("wrong refcnt", other_mg->refcnt == 1);
+       thread__put(other_leader);
+       TEST_ASSERT_EQUAL("wrong refcnt", atomic_read(&other_mg->refcnt), 1);
 
-       thread__delete(other);
-
-       /*
-        * Cannot call machine__delete_threads(machine) now,
-        * because we've already released all the threads.
-        */
+       thread__put(other);
 
        machines__exit(&machines);
        return 0;
index 3d9088003a5b6d16da0038d0abbfa1fc427ed65b..b34c5fc829ae2b0da7389bd31649dd0389e40604 100644 (file)
@@ -23,9 +23,10 @@ int test__vmlinux_matches_kallsyms(void)
        int err = -1;
        struct rb_node *nd;
        struct symbol *sym;
-       struct map *kallsyms_map, *vmlinux_map;
+       struct map *kallsyms_map, *vmlinux_map, *map;
        struct machine kallsyms, vmlinux;
        enum map_type type = MAP__FUNCTION;
+       struct maps *maps = &vmlinux.kmaps.maps[type];
        u64 mem_start, mem_end;
 
        /*
@@ -184,8 +185,8 @@ detour:
 
        pr_info("Maps only in vmlinux:\n");
 
-       for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
-               struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
+       for (map = maps__first(maps); map; map = map__next(map)) {
+               struct map *
                /*
                 * If it is the kernel, kallsyms is always "[kernel.kallsyms]", while
                 * the kernel will have the path for the vmlinux file being used,
@@ -193,22 +194,22 @@ detour:
                 * both cases.
                 */
                pair = map_groups__find_by_name(&kallsyms.kmaps, type,
-                                               (pos->dso->kernel ?
-                                                       pos->dso->short_name :
-                                                       pos->dso->name));
+                                               (map->dso->kernel ?
+                                                       map->dso->short_name :
+                                                       map->dso->name));
                if (pair)
                        pair->priv = 1;
                else
-                       map__fprintf(pos, stderr);
+                       map__fprintf(map, stderr);
        }
 
        pr_info("Maps in vmlinux with a different name in kallsyms:\n");
 
-       for (nd = rb_first(&vmlinux.kmaps.maps[type]); nd; nd = rb_next(nd)) {
-               struct map *pos = rb_entry(nd, struct map, rb_node), *pair;
+       for (map = maps__first(maps); map; map = map__next(map)) {
+               struct map *pair;
 
-               mem_start = vmlinux_map->unmap_ip(vmlinux_map, pos->start);
-               mem_end = vmlinux_map->unmap_ip(vmlinux_map, pos->end);
+               mem_start = vmlinux_map->unmap_ip(vmlinux_map, map->start);
+               mem_end = vmlinux_map->unmap_ip(vmlinux_map, map->end);
 
                pair = map_groups__find(&kallsyms.kmaps, type, mem_start);
                if (pair == NULL || pair->priv)
@@ -217,7 +218,7 @@ detour:
                if (pair->start == mem_start) {
                        pair->priv = 1;
                        pr_info(" %" PRIx64 "-%" PRIx64 " %" PRIx64 " %s in kallsyms as",
-                               pos->start, pos->end, pos->pgoff, pos->dso->name);
+                               map->start, map->end, map->pgoff, map->dso->name);
                        if (mem_end != pair->end)
                                pr_info(":\n*%" PRIx64 "-%" PRIx64 " %" PRIx64,
                                        pair->start, pair->end, pair->pgoff);
@@ -228,12 +229,11 @@ detour:
 
        pr_info("Maps only in kallsyms:\n");
 
-       for (nd = rb_first(&kallsyms.kmaps.maps[type]);
-            nd; nd = rb_next(nd)) {
-               struct map *pos = rb_entry(nd, struct map, rb_node);
+       maps = &kallsyms.kmaps.maps[type];
 
-               if (!pos->priv)
-                       map__fprintf(pos, stderr);
+       for (map = maps__first(maps); map; map = map__next(map)) {
+               if (!map->priv)
+                       map__fprintf(map, stderr);
        }
 out:
        machine__exit(&kallsyms);
index e5250eb2dd57866b1051736767dbe598bb4c78d2..5995a8bd7c6971dc4300f9ecc508135645420e7e 100644 (file)
 #include "../../util/evsel.h"
 #include <pthread.h>
 
+struct disasm_line_samples {
+       double          percent;
+       u64             nr;
+};
+
 struct browser_disasm_line {
-       struct rb_node  rb_node;
-       u32             idx;
-       int             idx_asm;
-       int             jump_sources;
+       struct rb_node                  rb_node;
+       u32                             idx;
+       int                             idx_asm;
+       int                             jump_sources;
        /*
         * actual length of this array is saved on the nr_events field
         * of the struct annotate_browser
         */
-       double          percent[1];
+       struct disasm_line_samples      samples[1];
 };
 
 static struct annotate_browser_opt {
@@ -28,7 +33,8 @@ static struct annotate_browser_opt {
             use_offset,
             jump_arrows,
             show_linenr,
-            show_nr_jumps;
+            show_nr_jumps,
+            show_total_period;
 } annotate_browser__opts = {
        .use_offset     = true,
        .jump_arrows    = true,
@@ -105,15 +111,20 @@ static void annotate_browser__write(struct ui_browser *browser, void *entry, int
        char bf[256];
 
        for (i = 0; i < ab->nr_events; i++) {
-               if (bdl->percent[i] > percent_max)
-                       percent_max = bdl->percent[i];
+               if (bdl->samples[i].percent > percent_max)
+                       percent_max = bdl->samples[i].percent;
        }
 
        if (dl->offset != -1 && percent_max != 0.0) {
                for (i = 0; i < ab->nr_events; i++) {
-                       ui_browser__set_percent_color(browser, bdl->percent[i],
+                       ui_browser__set_percent_color(browser,
+                                                     bdl->samples[i].percent,
                                                      current_entry);
-                       slsmg_printf("%6.2f ", bdl->percent[i]);
+                       if (annotate_browser__opts.show_total_period)
+                               slsmg_printf("%6" PRIu64 " ",
+                                            bdl->samples[i].nr);
+                       else
+                               slsmg_printf("%6.2f ", bdl->samples[i].percent);
                }
        } else {
                ui_browser__set_percent_color(browser, 0, current_entry);
@@ -273,9 +284,9 @@ static int disasm__cmp(struct browser_disasm_line *a,
        int i;
 
        for (i = 0; i < nr_pcnt; i++) {
-               if (a->percent[i] == b->percent[i])
+               if (a->samples[i].percent == b->samples[i].percent)
                        continue;
-               return a->percent[i] < b->percent[i];
+               return a->samples[i].percent < b->samples[i].percent;
        }
        return 0;
 }
@@ -366,14 +377,17 @@ static void annotate_browser__calc_percent(struct annotate_browser *browser,
                next = disasm__get_next_ip_line(&notes->src->source, pos);
 
                for (i = 0; i < browser->nr_events; i++) {
-                       bpos->percent[i] = disasm__calc_percent(notes,
+                       u64 nr_samples;
+
+                       bpos->samples[i].percent = disasm__calc_percent(notes,
                                                evsel->idx + i,
                                                pos->offset,
                                                next ? next->offset : len,
-                                               &path);
+                                               &path, &nr_samples);
+                       bpos->samples[i].nr = nr_samples;
 
-                       if (max_percent < bpos->percent[i])
-                               max_percent = bpos->percent[i];
+                       if (max_percent < bpos->samples[i].percent)
+                               max_percent = bpos->samples[i].percent;
                }
 
                if (max_percent < 0.01) {
@@ -737,6 +751,7 @@ static int annotate_browser__run(struct annotate_browser *browser,
                "n             Search next string\n"
                "o             Toggle disassembler output/simplified view\n"
                "s             Toggle source code view\n"
+               "t             Toggle total period view\n"
                "/             Search string\n"
                "k             Toggle line numbers\n"
                "r             Run available scripts\n"
@@ -812,6 +827,11 @@ show_sup_ins:
                                ui_helpline__puts("Actions are only available for 'callq', 'retq' & jump instructions.");
                        }
                        continue;
+               case 't':
+                       annotate_browser__opts.show_total_period =
+                         !annotate_browser__opts.show_total_period;
+                       annotate_browser__update_addr_width(browser);
+                       continue;
                case K_LEFT:
                case K_ESC:
                case 'q':
@@ -832,12 +852,20 @@ out:
 int map_symbol__tui_annotate(struct map_symbol *ms, struct perf_evsel *evsel,
                             struct hist_browser_timer *hbt)
 {
+       /* Set default value for show_total_period.  */
+       annotate_browser__opts.show_total_period =
+         symbol_conf.show_total_period;
+
        return symbol__tui_annotate(ms->sym, ms->map, evsel, hbt);
 }
 
 int hist_entry__tui_annotate(struct hist_entry *he, struct perf_evsel *evsel,
                             struct hist_browser_timer *hbt)
 {
+       /* reset abort key so that it can get Ctrl-C as a key */
+       SLang_reset_tty();
+       SLang_init_tty(0, 0, 0);
+
        return map_symbol__tui_annotate(&he->ms, evsel, hbt);
 }
 
@@ -925,7 +953,8 @@ int symbol__tui_annotate(struct symbol *sym, struct map *map,
 
        if (perf_evsel__is_group_event(evsel)) {
                nr_pcnt = evsel->nr_members;
-               sizeof_bdl += sizeof(double) * (nr_pcnt - 1);
+               sizeof_bdl += sizeof(struct disasm_line_samples) *
+                 (nr_pcnt - 1);
        }
 
        if (symbol__annotate(sym, map, sizeof_bdl) < 0) {
@@ -1002,6 +1031,7 @@ static struct annotate_config {
        ANNOTATE_CFG(show_linenr),
        ANNOTATE_CFG(show_nr_jumps),
        ANNOTATE_CFG(use_offset),
+       ANNOTATE_CFG(show_total_period),
 };
 
 #undef ANNOTATE_CFG
index 995b7a8596b1420e9764f08f18326d319f2a0a31..c42adb6000914554bf0d109e02d9ad5cec801313 100644 (file)
@@ -25,6 +25,9 @@ struct hist_browser {
        struct hists        *hists;
        struct hist_entry   *he_selection;
        struct map_symbol   *selection;
+       struct hist_browser_timer *hbt;
+       struct pstack       *pstack;
+       struct perf_session_env *env;
        int                  print_seq;
        bool                 show_dso;
        bool                 show_headers;
@@ -60,7 +63,7 @@ static int hist_browser__get_folding(struct hist_browser *browser)
                struct hist_entry *he =
                        rb_entry(nd, struct hist_entry, rb_node);
 
-               if (he->ms.unfolded)
+               if (he->unfolded)
                        unfolded_rows += he->nr_rows;
        }
        return unfolded_rows;
@@ -136,24 +139,19 @@ static char tree__folded_sign(bool unfolded)
        return unfolded ? '-' : '+';
 }
 
-static char map_symbol__folded(const struct map_symbol *ms)
-{
-       return ms->has_children ? tree__folded_sign(ms->unfolded) : ' ';
-}
-
 static char hist_entry__folded(const struct hist_entry *he)
 {
-       return map_symbol__folded(&he->ms);
+       return he->has_children ? tree__folded_sign(he->unfolded) : ' ';
 }
 
 static char callchain_list__folded(const struct callchain_list *cl)
 {
-       return map_symbol__folded(&cl->ms);
+       return cl->has_children ? tree__folded_sign(cl->unfolded) : ' ';
 }
 
-static void map_symbol__set_folding(struct map_symbol *ms, bool unfold)
+static void callchain_list__set_folding(struct callchain_list *cl, bool unfold)
 {
-       ms->unfolded = unfold ? ms->has_children : false;
+       cl->unfolded = unfold ? cl->has_children : false;
 }
 
 static int callchain_node__count_rows_rb_tree(struct callchain_node *node)
@@ -189,7 +187,7 @@ static int callchain_node__count_rows(struct callchain_node *node)
 
        list_for_each_entry(chain, &node->val, list) {
                ++n;
-               unfolded = chain->ms.unfolded;
+               unfolded = chain->unfolded;
        }
 
        if (unfolded)
@@ -211,15 +209,27 @@ static int callchain__count_rows(struct rb_root *chain)
        return n;
 }
 
-static bool map_symbol__toggle_fold(struct map_symbol *ms)
+static bool hist_entry__toggle_fold(struct hist_entry *he)
 {
-       if (!ms)
+       if (!he)
                return false;
 
-       if (!ms->has_children)
+       if (!he->has_children)
                return false;
 
-       ms->unfolded = !ms->unfolded;
+       he->unfolded = !he->unfolded;
+       return true;
+}
+
+static bool callchain_list__toggle_fold(struct callchain_list *cl)
+{
+       if (!cl)
+               return false;
+
+       if (!cl->has_children)
+               return false;
+
+       cl->unfolded = !cl->unfolded;
        return true;
 }
 
@@ -235,10 +245,10 @@ static void callchain_node__init_have_children_rb_tree(struct callchain_node *no
                list_for_each_entry(chain, &child->val, list) {
                        if (first) {
                                first = false;
-                               chain->ms.has_children = chain->list.next != &child->val ||
+                               chain->has_children = chain->list.next != &child->val ||
                                                         !RB_EMPTY_ROOT(&child->rb_root);
                        } else
-                               chain->ms.has_children = chain->list.next == &child->val &&
+                               chain->has_children = chain->list.next == &child->val &&
                                                         !RB_EMPTY_ROOT(&child->rb_root);
                }
 
@@ -252,11 +262,11 @@ static void callchain_node__init_have_children(struct callchain_node *node,
        struct callchain_list *chain;
 
        chain = list_entry(node->val.next, struct callchain_list, list);
-       chain->ms.has_children = has_sibling;
+       chain->has_children = has_sibling;
 
        if (!list_empty(&node->val)) {
                chain = list_entry(node->val.prev, struct callchain_list, list);
-               chain->ms.has_children = !RB_EMPTY_ROOT(&node->rb_root);
+               chain->has_children = !RB_EMPTY_ROOT(&node->rb_root);
        }
 
        callchain_node__init_have_children_rb_tree(node);
@@ -276,7 +286,7 @@ static void callchain__init_have_children(struct rb_root *root)
 static void hist_entry__init_have_children(struct hist_entry *he)
 {
        if (!he->init_have_children) {
-               he->ms.has_children = !RB_EMPTY_ROOT(&he->sorted_chain);
+               he->has_children = !RB_EMPTY_ROOT(&he->sorted_chain);
                callchain__init_have_children(&he->sorted_chain);
                he->init_have_children = true;
        }
@@ -284,14 +294,22 @@ static void hist_entry__init_have_children(struct hist_entry *he)
 
 static bool hist_browser__toggle_fold(struct hist_browser *browser)
 {
-       if (map_symbol__toggle_fold(browser->selection)) {
-               struct hist_entry *he = browser->he_selection;
+       struct hist_entry *he = browser->he_selection;
+       struct map_symbol *ms = browser->selection;
+       struct callchain_list *cl = container_of(ms, struct callchain_list, ms);
+       bool has_children;
 
+       if (ms == &he->ms)
+               has_children = hist_entry__toggle_fold(he);
+       else
+               has_children = callchain_list__toggle_fold(cl);
+
+       if (has_children) {
                hist_entry__init_have_children(he);
                browser->b.nr_entries -= he->nr_rows;
                browser->nr_callchain_rows -= he->nr_rows;
 
-               if (he->ms.unfolded)
+               if (he->unfolded)
                        he->nr_rows = callchain__count_rows(&he->sorted_chain);
                else
                        he->nr_rows = 0;
@@ -318,8 +336,8 @@ static int callchain_node__set_folding_rb_tree(struct callchain_node *node, bool
 
                list_for_each_entry(chain, &child->val, list) {
                        ++n;
-                       map_symbol__set_folding(&chain->ms, unfold);
-                       has_children = chain->ms.has_children;
+                       callchain_list__set_folding(chain, unfold);
+                       has_children = chain->has_children;
                }
 
                if (has_children)
@@ -337,8 +355,8 @@ static int callchain_node__set_folding(struct callchain_node *node, bool unfold)
 
        list_for_each_entry(chain, &node->val, list) {
                ++n;
-               map_symbol__set_folding(&chain->ms, unfold);
-               has_children = chain->ms.has_children;
+               callchain_list__set_folding(chain, unfold);
+               has_children = chain->has_children;
        }
 
        if (has_children)
@@ -363,9 +381,9 @@ static int callchain__set_folding(struct rb_root *chain, bool unfold)
 static void hist_entry__set_folding(struct hist_entry *he, bool unfold)
 {
        hist_entry__init_have_children(he);
-       map_symbol__set_folding(&he->ms, unfold);
+       he->unfolded = unfold ? he->has_children : false;
 
-       if (he->ms.has_children) {
+       if (he->has_children) {
                int n = callchain__set_folding(&he->sorted_chain, unfold);
                he->nr_rows = unfold ? n : 0;
        } else
@@ -406,11 +424,11 @@ static void ui_browser__warn_lost_events(struct ui_browser *browser)
                "Or reduce the sampling frequency.");
 }
 
-static int hist_browser__run(struct hist_browser *browser,
-                            struct hist_browser_timer *hbt)
+static int hist_browser__run(struct hist_browser *browser, const char *help)
 {
        int key;
        char title[160];
+       struct hist_browser_timer *hbt = browser->hbt;
        int delay_secs = hbt ? hbt->refresh : 0;
 
        browser->b.entries = &browser->hists->entries;
@@ -418,8 +436,7 @@ static int hist_browser__run(struct hist_browser *browser,
 
        hists__browser_title(browser->hists, hbt, title, sizeof(title));
 
-       if (ui_browser__show(&browser->b, title,
-                            "Press '?' for help on key bindings") < 0)
+       if (ui_browser__show(&browser->b, title, help) < 0)
                return -1;
 
        while (1) {
@@ -1016,7 +1033,7 @@ do_offset:
        if (offset > 0) {
                do {
                        h = rb_entry(nd, struct hist_entry, rb_node);
-                       if (h->ms.unfolded) {
+                       if (h->unfolded) {
                                u16 remaining = h->nr_rows - h->row_offset;
                                if (offset > remaining) {
                                        offset -= remaining;
@@ -1037,7 +1054,7 @@ do_offset:
        } else if (offset < 0) {
                while (1) {
                        h = rb_entry(nd, struct hist_entry, rb_node);
-                       if (h->ms.unfolded) {
+                       if (h->unfolded) {
                                if (first) {
                                        if (-offset > h->row_offset) {
                                                offset += h->row_offset;
@@ -1074,7 +1091,7 @@ do_offset:
                                 * row_offset at its last entry.
                                 */
                                h = rb_entry(nd, struct hist_entry, rb_node);
-                               if (h->ms.unfolded)
+                               if (h->unfolded)
                                        h->row_offset = h->nr_rows;
                                break;
                        }
@@ -1195,7 +1212,9 @@ static int hist_browser__dump(struct hist_browser *browser)
        return 0;
 }
 
-static struct hist_browser *hist_browser__new(struct hists *hists)
+static struct hist_browser *hist_browser__new(struct hists *hists,
+                                             struct hist_browser_timer *hbt,
+                                             struct perf_session_env *env)
 {
        struct hist_browser *browser = zalloc(sizeof(*browser));
 
@@ -1206,6 +1225,8 @@ static struct hist_browser *hist_browser__new(struct hists *hists)
                browser->b.seek = ui_browser__hists_seek;
                browser->b.use_navkeypressed = true;
                browser->show_headers = symbol_conf.show_hist_headers;
+               browser->hbt = hbt;
+               browser->env = env;
        }
 
        return browser;
@@ -1395,6 +1416,257 @@ close_file_and_continue:
        return ret;
 }
 
+struct popup_action {
+       struct thread           *thread;
+       struct dso              *dso;
+       struct map_symbol       ms;
+
+       int (*fn)(struct hist_browser *browser, struct popup_action *act);
+};
+
+static int
+do_annotate(struct hist_browser *browser, struct popup_action *act)
+{
+       struct perf_evsel *evsel;
+       struct annotation *notes;
+       struct hist_entry *he;
+       int err;
+
+       if (!objdump_path && perf_session_env__lookup_objdump(browser->env))
+               return 0;
+
+       notes = symbol__annotation(act->ms.sym);
+       if (!notes->src)
+               return 0;
+
+       evsel = hists_to_evsel(browser->hists);
+       err = map_symbol__tui_annotate(&act->ms, evsel, browser->hbt);
+       he = hist_browser__selected_entry(browser);
+       /*
+        * offer option to annotate the other branch source or target
+        * (if they exists) when returning from annotate
+        */
+       if ((err == 'q' || err == CTRL('c')) && he->branch_info)
+               return 1;
+
+       ui_browser__update_nr_entries(&browser->b, browser->hists->nr_entries);
+       if (err)
+               ui_browser__handle_resize(&browser->b);
+       return 0;
+}
+
+static int
+add_annotate_opt(struct hist_browser *browser __maybe_unused,
+                struct popup_action *act, char **optstr,
+                struct map *map, struct symbol *sym)
+{
+       if (sym == NULL || map->dso->annotate_warned)
+               return 0;
+
+       if (asprintf(optstr, "Annotate %s", sym->name) < 0)
+               return 0;
+
+       act->ms.map = map;
+       act->ms.sym = sym;
+       act->fn = do_annotate;
+       return 1;
+}
+
+static int
+do_zoom_thread(struct hist_browser *browser, struct popup_action *act)
+{
+       struct thread *thread = act->thread;
+
+       if (browser->hists->thread_filter) {
+               pstack__remove(browser->pstack, &browser->hists->thread_filter);
+               perf_hpp__set_elide(HISTC_THREAD, false);
+               thread__zput(browser->hists->thread_filter);
+               ui_helpline__pop();
+       } else {
+               ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s(%d) thread\"",
+                                  thread->comm_set ? thread__comm_str(thread) : "",
+                                  thread->tid);
+               browser->hists->thread_filter = thread__get(thread);
+               perf_hpp__set_elide(HISTC_THREAD, false);
+               pstack__push(browser->pstack, &browser->hists->thread_filter);
+       }
+
+       hists__filter_by_thread(browser->hists);
+       hist_browser__reset(browser);
+       return 0;
+}
+
+static int
+add_thread_opt(struct hist_browser *browser, struct popup_action *act,
+              char **optstr, struct thread *thread)
+{
+       if (thread == NULL)
+               return 0;
+
+       if (asprintf(optstr, "Zoom %s %s(%d) thread",
+                    browser->hists->thread_filter ? "out of" : "into",
+                    thread->comm_set ? thread__comm_str(thread) : "",
+                    thread->tid) < 0)
+               return 0;
+
+       act->thread = thread;
+       act->fn = do_zoom_thread;
+       return 1;
+}
+
+static int
+do_zoom_dso(struct hist_browser *browser, struct popup_action *act)
+{
+       struct dso *dso = act->dso;
+
+       if (browser->hists->dso_filter) {
+               pstack__remove(browser->pstack, &browser->hists->dso_filter);
+               perf_hpp__set_elide(HISTC_DSO, false);
+               browser->hists->dso_filter = NULL;
+               ui_helpline__pop();
+       } else {
+               if (dso == NULL)
+                       return 0;
+               ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s DSO\"",
+                                  dso->kernel ? "the Kernel" : dso->short_name);
+               browser->hists->dso_filter = dso;
+               perf_hpp__set_elide(HISTC_DSO, true);
+               pstack__push(browser->pstack, &browser->hists->dso_filter);
+       }
+
+       hists__filter_by_dso(browser->hists);
+       hist_browser__reset(browser);
+       return 0;
+}
+
+static int
+add_dso_opt(struct hist_browser *browser, struct popup_action *act,
+           char **optstr, struct dso *dso)
+{
+       if (dso == NULL)
+               return 0;
+
+       if (asprintf(optstr, "Zoom %s %s DSO",
+                    browser->hists->dso_filter ? "out of" : "into",
+                    dso->kernel ? "the Kernel" : dso->short_name) < 0)
+               return 0;
+
+       act->dso = dso;
+       act->fn = do_zoom_dso;
+       return 1;
+}
+
+static int
+do_browse_map(struct hist_browser *browser __maybe_unused,
+             struct popup_action *act)
+{
+       map__browse(act->ms.map);
+       return 0;
+}
+
+static int
+add_map_opt(struct hist_browser *browser __maybe_unused,
+           struct popup_action *act, char **optstr, struct map *map)
+{
+       if (map == NULL)
+               return 0;
+
+       if (asprintf(optstr, "Browse map details") < 0)
+               return 0;
+
+       act->ms.map = map;
+       act->fn = do_browse_map;
+       return 1;
+}
+
+static int
+do_run_script(struct hist_browser *browser __maybe_unused,
+             struct popup_action *act)
+{
+       char script_opt[64];
+       memset(script_opt, 0, sizeof(script_opt));
+
+       if (act->thread) {
+               scnprintf(script_opt, sizeof(script_opt), " -c %s ",
+                         thread__comm_str(act->thread));
+       } else if (act->ms.sym) {
+               scnprintf(script_opt, sizeof(script_opt), " -S %s ",
+                         act->ms.sym->name);
+       }
+
+       script_browse(script_opt);
+       return 0;
+}
+
+static int
+add_script_opt(struct hist_browser *browser __maybe_unused,
+              struct popup_action *act, char **optstr,
+              struct thread *thread, struct symbol *sym)
+{
+       if (thread) {
+               if (asprintf(optstr, "Run scripts for samples of thread [%s]",
+                            thread__comm_str(thread)) < 0)
+                       return 0;
+       } else if (sym) {
+               if (asprintf(optstr, "Run scripts for samples of symbol [%s]",
+                            sym->name) < 0)
+                       return 0;
+       } else {
+               if (asprintf(optstr, "Run scripts for all samples") < 0)
+                       return 0;
+       }
+
+       act->thread = thread;
+       act->ms.sym = sym;
+       act->fn = do_run_script;
+       return 1;
+}
+
+static int
+do_switch_data(struct hist_browser *browser __maybe_unused,
+              struct popup_action *act __maybe_unused)
+{
+       if (switch_data_file()) {
+               ui__warning("Won't switch the data files due to\n"
+                           "no valid data file get selected!\n");
+               return 0;
+       }
+
+       return K_SWITCH_INPUT_DATA;
+}
+
+static int
+add_switch_opt(struct hist_browser *browser,
+              struct popup_action *act, char **optstr)
+{
+       if (!is_report_browser(browser->hbt))
+               return 0;
+
+       if (asprintf(optstr, "Switch to another data file in PWD") < 0)
+               return 0;
+
+       act->fn = do_switch_data;
+       return 1;
+}
+
+static int
+do_exit_browser(struct hist_browser *browser __maybe_unused,
+               struct popup_action *act __maybe_unused)
+{
+       return 0;
+}
+
+static int
+add_exit_opt(struct hist_browser *browser __maybe_unused,
+            struct popup_action *act, char **optstr)
+{
+       if (asprintf(optstr, "Exit") < 0)
+               return 0;
+
+       act->fn = do_exit_browser;
+       return 1;
+}
+
 static void hist_browser__update_nr_entries(struct hist_browser *hb)
 {
        u64 nr_entries = 0;
@@ -1421,14 +1693,14 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                                    struct perf_session_env *env)
 {
        struct hists *hists = evsel__hists(evsel);
-       struct hist_browser *browser = hist_browser__new(hists);
+       struct hist_browser *browser = hist_browser__new(hists, hbt, env);
        struct branch_info *bi;
-       struct pstack *fstack;
-       char *options[16];
+#define MAX_OPTIONS  16
+       char *options[MAX_OPTIONS];
+       struct popup_action actions[MAX_OPTIONS];
        int nr_options = 0;
        int key = -1;
        char buf[64];
-       char script_opt[64];
        int delay_secs = hbt ? hbt->refresh : 0;
        struct perf_hpp_fmt *fmt;
 
@@ -1463,23 +1735,29 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
        "t             Zoom into current Thread\n"
        "V             Verbose (DSO names in callchains, etc)\n"
        "z             Toggle zeroing of samples\n"
+       "f             Enable/Disable events\n"
        "/             Filter symbol by name";
 
        if (browser == NULL)
                return -1;
 
+       /* reset abort key so that it can get Ctrl-C as a key */
+       SLang_reset_tty();
+       SLang_init_tty(0, 0, 0);
+
        if (min_pcnt) {
                browser->min_pcnt = min_pcnt;
                hist_browser__update_nr_entries(browser);
        }
 
-       fstack = pstack__new(2);
-       if (fstack == NULL)
+       browser->pstack = pstack__new(2);
+       if (browser->pstack == NULL)
                goto out;
 
        ui_helpline__push(helpline);
 
        memset(options, 0, sizeof(options));
+       memset(actions, 0, sizeof(actions));
 
        perf_hpp__for_each_format(fmt)
                perf_hpp__reset_width(fmt, hists);
@@ -1489,16 +1767,12 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
 
        while (1) {
                struct thread *thread = NULL;
-               const struct dso *dso = NULL;
-               int choice = 0,
-                   annotate = -2, zoom_dso = -2, zoom_thread = -2,
-                   annotate_f = -2, annotate_t = -2, browse_map = -2;
-               int scripts_comm = -2, scripts_symbol = -2,
-                   scripts_all = -2, switch_data = -2;
+               struct dso *dso = NULL;
+               int choice = 0;
 
                nr_options = 0;
 
-               key = hist_browser__run(browser, hbt);
+               key = hist_browser__run(browser, helpline);
 
                if (browser->he_selection != NULL) {
                        thread = hist_browser__selected_thread(browser);
@@ -1526,17 +1800,25 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                            browser->selection->sym == NULL ||
                            browser->selection->map->dso->annotate_warned)
                                continue;
-                       goto do_annotate;
+
+                       actions->ms.map = browser->selection->map;
+                       actions->ms.sym = browser->selection->sym;
+                       do_annotate(browser, actions);
+                       continue;
                case 'P':
                        hist_browser__dump(browser);
                        continue;
                case 'd':
-                       goto zoom_dso;
+                       actions->dso = dso;
+                       do_zoom_dso(browser, actions);
+                       continue;
                case 'V':
                        browser->show_dso = !browser->show_dso;
                        continue;
                case 't':
-                       goto zoom_thread;
+                       actions->thread = thread;
+                       do_zoom_thread(browser, actions);
+                       continue;
                case '/':
                        if (ui_browser__input_window("Symbol to show",
                                        "Please enter the name of symbol you want to see",
@@ -1548,12 +1830,18 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                        }
                        continue;
                case 'r':
-                       if (is_report_browser(hbt))
-                               goto do_scripts;
+                       if (is_report_browser(hbt)) {
+                               actions->thread = NULL;
+                               actions->ms.sym = NULL;
+                               do_run_script(browser, actions);
+                       }
                        continue;
                case 's':
-                       if (is_report_browser(hbt))
-                               goto do_data_switch;
+                       if (is_report_browser(hbt)) {
+                               key = do_switch_data(browser, actions);
+                               if (key == K_SWITCH_INPUT_DATA)
+                                       goto out_free_stack;
+                       }
                        continue;
                case 'i':
                        /* env->arch is NULL for live-mode (i.e. perf top) */
@@ -1583,7 +1871,7 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                case K_LEFT: {
                        const void *top;
 
-                       if (pstack__empty(fstack)) {
+                       if (pstack__empty(browser->pstack)) {
                                /*
                                 * Go back to the perf_evsel_menu__run or other user
                                 */
@@ -1591,11 +1879,17 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                                        goto out_free_stack;
                                continue;
                        }
-                       top = pstack__pop(fstack);
-                       if (top == &browser->hists->dso_filter)
-                               goto zoom_out_dso;
+                       top = pstack__peek(browser->pstack);
+                       if (top == &browser->hists->dso_filter) {
+                               /*
+                                * No need to set actions->dso here since
+                                * it's just to remove the current filter.
+                                * Ditto for thread below.
+                                */
+                               do_zoom_dso(browser, actions);
+                       }
                        if (top == &browser->hists->thread_filter)
-                               goto zoom_out_thread;
+                               do_zoom_thread(browser, actions);
                        continue;
                }
                case K_ESC:
@@ -1607,7 +1901,12 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                case 'q':
                case CTRL('c'):
                        goto out_free_stack;
+               case 'f':
+                       if (!is_report_browser(hbt))
+                               goto out_free_stack;
+                       /* Fall thru */
                default:
+                       helpline = "Press '?' for help on key bindings";
                        continue;
                }
 
@@ -1623,196 +1922,71 @@ static int perf_evsel__hists_browse(struct perf_evsel *evsel, int nr_events,
                        if (bi == NULL)
                                goto skip_annotation;
 
-                       if (bi->from.sym != NULL &&
-                           !bi->from.map->dso->annotate_warned &&
-                           asprintf(&options[nr_options], "Annotate %s", bi->from.sym->name) > 0) {
-                               annotate_f = nr_options++;
-                       }
-
-                       if (bi->to.sym != NULL &&
-                           !bi->to.map->dso->annotate_warned &&
-                           (bi->to.sym != bi->from.sym ||
-                            bi->to.map->dso != bi->from.map->dso) &&
-                           asprintf(&options[nr_options], "Annotate %s", bi->to.sym->name) > 0) {
-                               annotate_t = nr_options++;
-                       }
+                       nr_options += add_annotate_opt(browser,
+                                                      &actions[nr_options],
+                                                      &options[nr_options],
+                                                      bi->from.map,
+                                                      bi->from.sym);
+                       if (bi->to.sym != bi->from.sym)
+                               nr_options += add_annotate_opt(browser,
+                                                       &actions[nr_options],
+                                                       &options[nr_options],
+                                                       bi->to.map,
+                                                       bi->to.sym);
                } else {
-                       if (browser->selection->sym != NULL &&
-                           !browser->selection->map->dso->annotate_warned) {
-                               struct annotation *notes;
-
-                               notes = symbol__annotation(browser->selection->sym);
-
-                               if (notes->src &&
-                                   asprintf(&options[nr_options], "Annotate %s",
-                                                browser->selection->sym->name) > 0) {
-                                       annotate = nr_options++;
-                               }
-                       }
+                       nr_options += add_annotate_opt(browser,
+                                                      &actions[nr_options],
+                                                      &options[nr_options],
+                                                      browser->selection->map,
+                                                      browser->selection->sym);
                }
 skip_annotation:
-               if (thread != NULL &&
-                   asprintf(&options[nr_options], "Zoom %s %s(%d) thread",
-                            (browser->hists->thread_filter ? "out of" : "into"),
-                            (thread->comm_set ? thread__comm_str(thread) : ""),
-                            thread->tid) > 0)
-                       zoom_thread = nr_options++;
-
-               if (dso != NULL &&
-                   asprintf(&options[nr_options], "Zoom %s %s DSO",
-                            (browser->hists->dso_filter ? "out of" : "into"),
-                            (dso->kernel ? "the Kernel" : dso->short_name)) > 0)
-                       zoom_dso = nr_options++;
-
-               if (browser->selection != NULL &&
-                   browser->selection->map != NULL &&
-                   asprintf(&options[nr_options], "Browse map details") > 0)
-                       browse_map = nr_options++;
+               nr_options += add_thread_opt(browser, &actions[nr_options],
+                                            &options[nr_options], thread);
+               nr_options += add_dso_opt(browser, &actions[nr_options],
+                                         &options[nr_options], dso);
+               nr_options += add_map_opt(browser, &actions[nr_options],
+                                         &options[nr_options],
+                                         browser->selection->map);
 
                /* perf script support */
                if (browser->he_selection) {
-                       struct symbol *sym;
-
-                       if (asprintf(&options[nr_options], "Run scripts for samples of thread [%s]",
-                                    thread__comm_str(browser->he_selection->thread)) > 0)
-                               scripts_comm = nr_options++;
-
-                       sym = browser->he_selection->ms.sym;
-                       if (sym && sym->namelen &&
-                               asprintf(&options[nr_options], "Run scripts for samples of symbol [%s]",
-                                               sym->name) > 0)
-                               scripts_symbol = nr_options++;
+                       nr_options += add_script_opt(browser,
+                                                    &actions[nr_options],
+                                                    &options[nr_options],
+                                                    thread, NULL);
+                       nr_options += add_script_opt(browser,
+                                                    &actions[nr_options],
+                                                    &options[nr_options],
+                                                    NULL, browser->selection->sym);
                }
-
-               if (asprintf(&options[nr_options], "Run scripts for all samples") > 0)
-                       scripts_all = nr_options++;
-
-               if (is_report_browser(hbt) && asprintf(&options[nr_options],
-                               "Switch to another data file in PWD") > 0)
-                       switch_data = nr_options++;
+               nr_options += add_script_opt(browser, &actions[nr_options],
+                                            &options[nr_options], NULL, NULL);
+               nr_options += add_switch_opt(browser, &actions[nr_options],
+                                            &options[nr_options]);
 add_exit_option:
-               options[nr_options++] = (char *)"Exit";
-retry_popup_menu:
-               choice = ui__popup_menu(nr_options, options);
-
-               if (choice == nr_options - 1)
-                       break;
-
-               if (choice == -1) {
-                       free_popup_options(options, nr_options - 1);
-                       continue;
-               }
-
-               if (choice == annotate || choice == annotate_t || choice == annotate_f) {
-                       struct hist_entry *he;
-                       struct annotation *notes;
-                       struct map_symbol ms;
-                       int err;
-do_annotate:
-                       if (!objdump_path && perf_session_env__lookup_objdump(env))
-                               continue;
-
-                       he = hist_browser__selected_entry(browser);
-                       if (he == NULL)
-                               continue;
-
-                       if (choice == annotate_f) {
-                               ms.map = he->branch_info->from.map;
-                               ms.sym = he->branch_info->from.sym;
-                       } else if (choice == annotate_t) {
-                               ms.map = he->branch_info->to.map;
-                               ms.sym = he->branch_info->to.sym;
-                       } else {
-                               ms = *browser->selection;
-                       }
+               nr_options += add_exit_opt(browser, &actions[nr_options],
+                                          &options[nr_options]);
 
-                       notes = symbol__annotation(ms.sym);
-                       if (!notes->src)
-                               continue;
-
-                       err = map_symbol__tui_annotate(&ms, evsel, hbt);
-                       /*
-                        * offer option to annotate the other branch source or target
-                        * (if they exists) when returning from annotate
-                        */
-                       if ((err == 'q' || err == CTRL('c'))
-                           && annotate_t != -2 && annotate_f != -2)
-                               goto retry_popup_menu;
-
-                       ui_browser__update_nr_entries(&browser->b, browser->hists->nr_entries);
-                       if (err)
-                               ui_browser__handle_resize(&browser->b);
-
-               } else if (choice == browse_map)
-                       map__browse(browser->selection->map);
-               else if (choice == zoom_dso) {
-zoom_dso:
-                       if (browser->hists->dso_filter) {
-                               pstack__remove(fstack, &browser->hists->dso_filter);
-zoom_out_dso:
-                               ui_helpline__pop();
-                               browser->hists->dso_filter = NULL;
-                               perf_hpp__set_elide(HISTC_DSO, false);
-                       } else {
-                               if (dso == NULL)
-                                       continue;
-                               ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s DSO\"",
-                                                  dso->kernel ? "the Kernel" : dso->short_name);
-                               browser->hists->dso_filter = dso;
-                               perf_hpp__set_elide(HISTC_DSO, true);
-                               pstack__push(fstack, &browser->hists->dso_filter);
-                       }
-                       hists__filter_by_dso(hists);
-                       hist_browser__reset(browser);
-               } else if (choice == zoom_thread) {
-zoom_thread:
-                       if (browser->hists->thread_filter) {
-                               pstack__remove(fstack, &browser->hists->thread_filter);
-zoom_out_thread:
-                               ui_helpline__pop();
-                               thread__zput(browser->hists->thread_filter);
-                               perf_hpp__set_elide(HISTC_THREAD, false);
-                       } else {
-                               ui_helpline__fpush("To zoom out press <- or -> + \"Zoom out of %s(%d) thread\"",
-                                                  thread->comm_set ? thread__comm_str(thread) : "",
-                                                  thread->tid);
-                               browser->hists->thread_filter = thread__get(thread);
-                               perf_hpp__set_elide(HISTC_THREAD, false);
-                               pstack__push(fstack, &browser->hists->thread_filter);
-                       }
-                       hists__filter_by_thread(hists);
-                       hist_browser__reset(browser);
-               }
-               /* perf scripts support */
-               else if (choice == scripts_all || choice == scripts_comm ||
-                               choice == scripts_symbol) {
-do_scripts:
-                       memset(script_opt, 0, 64);
+               do {
+                       struct popup_action *act;
 
-                       if (choice == scripts_comm)
-                               sprintf(script_opt, " -c %s ", thread__comm_str(browser->he_selection->thread));
+                       choice = ui__popup_menu(nr_options, options);
+                       if (choice == -1 || choice >= nr_options)
+                               break;
 
-                       if (choice == scripts_symbol)
-                               sprintf(script_opt, " -S %s ", browser->he_selection->ms.sym->name);
+                       act = &actions[choice];
+                       key = act->fn(browser, act);
+               } while (key == 1);
 
-                       script_browse(script_opt);
-               }
-               /* Switch to another data file */
-               else if (choice == switch_data) {
-do_data_switch:
-                       if (!switch_data_file()) {
-                               key = K_SWITCH_INPUT_DATA;
-                               break;
-                       } else
-                               ui__warning("Won't switch the data files due to\n"
-                                       "no valid data file get selected!\n");
-               }
+               if (key == K_SWITCH_INPUT_DATA)
+                       break;
        }
 out_free_stack:
-       pstack__delete(fstack);
+       pstack__delete(browser->pstack);
 out:
        hist_browser__delete(browser);
-       free_popup_options(options, nr_options - 1);
+       free_popup_options(options, MAX_OPTIONS);
        return key;
 }
 
index b77e1d7713637c711e144886c9914fe02cb110ca..60d1f29b4b50a9fedf0a163855056edfce1ed22b 100644 (file)
@@ -129,7 +129,7 @@ int ui__init(void)
        err = SLsmg_init_smg();
        if (err < 0)
                goto out;
-       err = SLang_init_tty(0, 0, 0);
+       err = SLang_init_tty(-1, 0, 0);
        if (err < 0)
                goto out;
 
index 797490a40075600c47e0341378e4ad9e24ef225e..586a59d46022a9fc8901807f5c02be4e2551db25 100644 (file)
@@ -68,12 +68,15 @@ libperf-y += rblist.o
 libperf-y += intlist.o
 libperf-y += vdso.o
 libperf-y += stat.o
+libperf-y += stat-shadow.o
 libperf-y += record.o
 libperf-y += srcline.o
 libperf-y += data.o
 libperf-$(CONFIG_X86) += tsc.o
 libperf-y += cloexec.o
 libperf-y += thread-stack.o
+libperf-$(CONFIG_AUXTRACE) += auxtrace.o
+libperf-y += parse-branch-options.o
 
 libperf-$(CONFIG_LIBELF) += symbol-elf.o
 libperf-$(CONFIG_LIBELF) += probe-event.o
@@ -101,23 +104,23 @@ CFLAGS_exec_cmd.o += -DPERF_EXEC_PATH="BUILD_STR($(perfexecdir_SQ))" -DPREFIX="B
 
 $(OUTPUT)util/parse-events-flex.c: util/parse-events.l $(OUTPUT)util/parse-events-bison.c
        $(call rule_mkdir)
-       @$(call echo-cmd,flex)$(FLEX) -o $@ --header-file=$(OUTPUT)util/parse-events-flex.h $(PARSER_DEBUG_FLEX) util/parse-events.l
+       $(Q)$(call echo-cmd,flex)$(FLEX) -o $@ --header-file=$(OUTPUT)util/parse-events-flex.h $(PARSER_DEBUG_FLEX) util/parse-events.l
 
 $(OUTPUT)util/parse-events-bison.c: util/parse-events.y
        $(call rule_mkdir)
-       @$(call echo-cmd,bison)$(BISON) -v util/parse-events.y -d $(PARSER_DEBUG_BISON) -o $@ -p parse_events_
+       $(Q)$(call echo-cmd,bison)$(BISON) -v util/parse-events.y -d $(PARSER_DEBUG_BISON) -o $@ -p parse_events_
 
 $(OUTPUT)util/pmu-flex.c: util/pmu.l $(OUTPUT)util/pmu-bison.c
        $(call rule_mkdir)
-       @$(call echo-cmd,flex)$(FLEX) -o $@ --header-file=$(OUTPUT)util/pmu-flex.h util/pmu.l
+       $(Q)$(call echo-cmd,flex)$(FLEX) -o $@ --header-file=$(OUTPUT)util/pmu-flex.h util/pmu.l
 
 $(OUTPUT)util/pmu-bison.c: util/pmu.y
        $(call rule_mkdir)
-       @$(call echo-cmd,bison)$(BISON) -v util/pmu.y -d -o $@ -p perf_pmu_
+       $(Q)$(call echo-cmd,bison)$(BISON) -v util/pmu.y -d -o $@ -p perf_pmu_
 
 CFLAGS_parse-events-flex.o  += -w
 CFLAGS_pmu-flex.o           += -w
-CFLAGS_parse-events-bison.o += -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -w
+CFLAGS_parse-events-bison.o += -DYYENABLE_NLS=0 -w
 CFLAGS_pmu-bison.o          += -DYYENABLE_NLS=0 -DYYLTYPE_IS_TRIVIAL=0 -w
 
 $(OUTPUT)util/parse-events.o: $(OUTPUT)util/parse-events-flex.c $(OUTPUT)util/parse-events-bison.c
index 7f5bdfc9bc87d1d1828efeb9f57755071b378e7a..03b7bc70eb66032d4502ec8bfda2e15a9d44cd57 100644 (file)
@@ -506,6 +506,17 @@ static int __symbol__inc_addr_samples(struct symbol *sym, struct map *map,
        return 0;
 }
 
+static struct annotation *symbol__get_annotation(struct symbol *sym)
+{
+       struct annotation *notes = symbol__annotation(sym);
+
+       if (notes->src == NULL) {
+               if (symbol__alloc_hist(sym) < 0)
+                       return NULL;
+       }
+       return notes;
+}
+
 static int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
                                    int evidx, u64 addr)
 {
@@ -513,13 +524,9 @@ static int symbol__inc_addr_samples(struct symbol *sym, struct map *map,
 
        if (sym == NULL)
                return 0;
-
-       notes = symbol__annotation(sym);
-       if (notes->src == NULL) {
-               if (symbol__alloc_hist(sym) < 0)
-                       return -ENOMEM;
-       }
-
+       notes = symbol__get_annotation(sym);
+       if (notes == NULL)
+               return -ENOMEM;
        return __symbol__inc_addr_samples(sym, map, notes, evidx, addr);
 }
 
@@ -647,14 +654,15 @@ struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disa
 }
 
 double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset,
-                           s64 end, const char **path)
+                           s64 end, const char **path, u64 *nr_samples)
 {
        struct source_line *src_line = notes->src->lines;
        double percent = 0.0;
+       *nr_samples = 0;
 
        if (src_line) {
                size_t sizeof_src_line = sizeof(*src_line) +
-                               sizeof(src_line->p) * (src_line->nr_pcnt - 1);
+                               sizeof(src_line->samples) * (src_line->nr_pcnt - 1);
 
                while (offset < end) {
                        src_line = (void *)notes->src->lines +
@@ -663,7 +671,8 @@ double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset,
                        if (*path == NULL)
                                *path = src_line->path;
 
-                       percent += src_line->p[evidx].percent;
+                       percent += src_line->samples[evidx].percent;
+                       *nr_samples += src_line->samples[evidx].nr;
                        offset++;
                }
        } else {
@@ -673,8 +682,10 @@ double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset,
                while (offset < end)
                        hits += h->addr[offset++];
 
-               if (h->sum)
+               if (h->sum) {
+                       *nr_samples = hits;
                        percent = 100.0 * hits / h->sum;
+               }
        }
 
        return percent;
@@ -689,8 +700,10 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st
 
        if (dl->offset != -1) {
                const char *path = NULL;
+               u64 nr_samples;
                double percent, max_percent = 0.0;
                double *ppercents = &percent;
+               u64 *psamples = &nr_samples;
                int i, nr_percent = 1;
                const char *color;
                struct annotation *notes = symbol__annotation(sym);
@@ -703,8 +716,10 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st
                if (perf_evsel__is_group_event(evsel)) {
                        nr_percent = evsel->nr_members;
                        ppercents = calloc(nr_percent, sizeof(double));
-                       if (ppercents == NULL)
+                       psamples = calloc(nr_percent, sizeof(u64));
+                       if (ppercents == NULL || psamples == NULL) {
                                return -1;
+                       }
                }
 
                for (i = 0; i < nr_percent; i++) {
@@ -712,9 +727,10 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st
                                        notes->src->lines ? i : evsel->idx + i,
                                        offset,
                                        next ? next->offset : (s64) len,
-                                       &path);
+                                       &path, &nr_samples);
 
                        ppercents[i] = percent;
+                       psamples[i] = nr_samples;
                        if (percent > max_percent)
                                max_percent = percent;
                }
@@ -752,8 +768,14 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st
 
                for (i = 0; i < nr_percent; i++) {
                        percent = ppercents[i];
+                       nr_samples = psamples[i];
                        color = get_percent_color(percent);
-                       color_fprintf(stdout, color, " %7.2f", percent);
+
+                       if (symbol_conf.show_total_period)
+                               color_fprintf(stdout, color, " %7" PRIu64,
+                                             nr_samples);
+                       else
+                               color_fprintf(stdout, color, " %7.2f", percent);
                }
 
                printf(" :      ");
@@ -763,6 +785,9 @@ static int disasm_line__print(struct disasm_line *dl, struct symbol *sym, u64 st
                if (ppercents != &percent)
                        free(ppercents);
 
+               if (psamples != &nr_samples)
+                       free(psamples);
+
        } else if (max_lines && printed >= max_lines)
                return 1;
        else {
@@ -1096,7 +1121,7 @@ static void insert_source_line(struct rb_root *root, struct source_line *src_lin
                ret = strcmp(iter->path, src_line->path);
                if (ret == 0) {
                        for (i = 0; i < src_line->nr_pcnt; i++)
-                               iter->p[i].percent_sum += src_line->p[i].percent;
+                               iter->samples[i].percent_sum += src_line->samples[i].percent;
                        return;
                }
 
@@ -1107,7 +1132,7 @@ static void insert_source_line(struct rb_root *root, struct source_line *src_lin
        }
 
        for (i = 0; i < src_line->nr_pcnt; i++)
-               src_line->p[i].percent_sum = src_line->p[i].percent;
+               src_line->samples[i].percent_sum = src_line->samples[i].percent;
 
        rb_link_node(&src_line->node, parent, p);
        rb_insert_color(&src_line->node, root);
@@ -1118,9 +1143,9 @@ static int cmp_source_line(struct source_line *a, struct source_line *b)
        int i;
 
        for (i = 0; i < a->nr_pcnt; i++) {
-               if (a->p[i].percent_sum == b->p[i].percent_sum)
+               if (a->samples[i].percent_sum == b->samples[i].percent_sum)
                        continue;
-               return a->p[i].percent_sum > b->p[i].percent_sum;
+               return a->samples[i].percent_sum > b->samples[i].percent_sum;
        }
 
        return 0;
@@ -1172,7 +1197,7 @@ static void symbol__free_source_line(struct symbol *sym, int len)
        int i;
 
        sizeof_src_line = sizeof(*src_line) +
-                         (sizeof(src_line->p) * (src_line->nr_pcnt - 1));
+                         (sizeof(src_line->samples) * (src_line->nr_pcnt - 1));
 
        for (i = 0; i < len; i++) {
                free_srcline(src_line->path);
@@ -1204,7 +1229,7 @@ static int symbol__get_source_line(struct symbol *sym, struct map *map,
                        h_sum += h->sum;
                }
                nr_pcnt = evsel->nr_members;
-               sizeof_src_line += (nr_pcnt - 1) * sizeof(src_line->p);
+               sizeof_src_line += (nr_pcnt - 1) * sizeof(src_line->samples);
        }
 
        if (!h_sum)
@@ -1224,10 +1249,10 @@ static int symbol__get_source_line(struct symbol *sym, struct map *map,
 
                for (k = 0; k < nr_pcnt; k++) {
                        h = annotation__histogram(notes, evidx + k);
-                       src_line->p[k].percent = 100.0 * h->addr[i] / h->sum;
+                       src_line->samples[k].percent = 100.0 * h->addr[i] / h->sum;
 
-                       if (src_line->p[k].percent > percent_max)
-                               percent_max = src_line->p[k].percent;
+                       if (src_line->samples[k].percent > percent_max)
+                               percent_max = src_line->samples[k].percent;
                }
 
                if (percent_max <= 0.5)
@@ -1267,7 +1292,7 @@ static void print_summary(struct rb_root *root, const char *filename)
 
                src_line = rb_entry(node, struct source_line, node);
                for (i = 0; i < src_line->nr_pcnt; i++) {
-                       percent = src_line->p[i].percent_sum;
+                       percent = src_line->samples[i].percent_sum;
                        color = get_percent_color(percent);
                        color_fprintf(stdout, color, " %7.2f", percent);
 
index cadbdc90a5cbf319385cb67aa5a88c06cdf107dc..7e78e6c270783475acb6dc897109254d6d266b35 100644 (file)
@@ -72,23 +72,24 @@ struct disasm_line *disasm__get_next_ip_line(struct list_head *head, struct disa
 int disasm_line__scnprintf(struct disasm_line *dl, char *bf, size_t size, bool raw);
 size_t disasm__fprintf(struct list_head *head, FILE *fp);
 double disasm__calc_percent(struct annotation *notes, int evidx, s64 offset,
-                           s64 end, const char **path);
+                           s64 end, const char **path, u64 *nr_samples);
 
 struct sym_hist {
        u64             sum;
        u64             addr[0];
 };
 
-struct source_line_percent {
+struct source_line_samples {
        double          percent;
        double          percent_sum;
+       double          nr;
 };
 
 struct source_line {
        struct rb_node  node;
        char            *path;
        int             nr_pcnt;
-       struct source_line_percent p[1];
+       struct source_line_samples samples[1];
 };
 
 /** struct annotated_source - symbols with hits have this attached as in sannotation
diff --git a/tools/perf/util/auxtrace.c b/tools/perf/util/auxtrace.c
new file mode 100644 (file)
index 0000000..df66966
--- /dev/null
@@ -0,0 +1,1352 @@
+/*
+ * auxtrace.c: AUX area trace support
+ * Copyright (c) 2013-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <stdbool.h>
+
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/log2.h>
+#include <linux/string.h>
+
+#include <sys/param.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <limits.h>
+#include <errno.h>
+#include <linux/list.h>
+
+#include "../perf.h"
+#include "util.h"
+#include "evlist.h"
+#include "cpumap.h"
+#include "thread_map.h"
+#include "asm/bug.h"
+#include "auxtrace.h"
+
+#include <linux/hash.h>
+
+#include "event.h"
+#include "session.h"
+#include "debug.h"
+#include "parse-options.h"
+
+int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
+                       struct auxtrace_mmap_params *mp,
+                       void *userpg, int fd)
+{
+       struct perf_event_mmap_page *pc = userpg;
+
+#if BITS_PER_LONG != 64 && !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
+       pr_err("Cannot use AUX area tracing mmaps\n");
+       return -1;
+#endif
+
+       WARN_ONCE(mm->base, "Uninitialized auxtrace_mmap\n");
+
+       mm->userpg = userpg;
+       mm->mask = mp->mask;
+       mm->len = mp->len;
+       mm->prev = 0;
+       mm->idx = mp->idx;
+       mm->tid = mp->tid;
+       mm->cpu = mp->cpu;
+
+       if (!mp->len) {
+               mm->base = NULL;
+               return 0;
+       }
+
+       pc->aux_offset = mp->offset;
+       pc->aux_size = mp->len;
+
+       mm->base = mmap(NULL, mp->len, mp->prot, MAP_SHARED, fd, mp->offset);
+       if (mm->base == MAP_FAILED) {
+               pr_debug2("failed to mmap AUX area\n");
+               mm->base = NULL;
+               return -1;
+       }
+
+       return 0;
+}
+
+void auxtrace_mmap__munmap(struct auxtrace_mmap *mm)
+{
+       if (mm->base) {
+               munmap(mm->base, mm->len);
+               mm->base = NULL;
+       }
+}
+
+void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
+                               off_t auxtrace_offset,
+                               unsigned int auxtrace_pages,
+                               bool auxtrace_overwrite)
+{
+       if (auxtrace_pages) {
+               mp->offset = auxtrace_offset;
+               mp->len = auxtrace_pages * (size_t)page_size;
+               mp->mask = is_power_of_2(mp->len) ? mp->len - 1 : 0;
+               mp->prot = PROT_READ | (auxtrace_overwrite ? 0 : PROT_WRITE);
+               pr_debug2("AUX area mmap length %zu\n", mp->len);
+       } else {
+               mp->len = 0;
+       }
+}
+
+void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
+                                  struct perf_evlist *evlist, int idx,
+                                  bool per_cpu)
+{
+       mp->idx = idx;
+
+       if (per_cpu) {
+               mp->cpu = evlist->cpus->map[idx];
+               if (evlist->threads)
+                       mp->tid = evlist->threads->map[0];
+               else
+                       mp->tid = -1;
+       } else {
+               mp->cpu = -1;
+               mp->tid = evlist->threads->map[idx];
+       }
+}
+
+#define AUXTRACE_INIT_NR_QUEUES        32
+
+static struct auxtrace_queue *auxtrace_alloc_queue_array(unsigned int nr_queues)
+{
+       struct auxtrace_queue *queue_array;
+       unsigned int max_nr_queues, i;
+
+       max_nr_queues = UINT_MAX / sizeof(struct auxtrace_queue);
+       if (nr_queues > max_nr_queues)
+               return NULL;
+
+       queue_array = calloc(nr_queues, sizeof(struct auxtrace_queue));
+       if (!queue_array)
+               return NULL;
+
+       for (i = 0; i < nr_queues; i++) {
+               INIT_LIST_HEAD(&queue_array[i].head);
+               queue_array[i].priv = NULL;
+       }
+
+       return queue_array;
+}
+
+int auxtrace_queues__init(struct auxtrace_queues *queues)
+{
+       queues->nr_queues = AUXTRACE_INIT_NR_QUEUES;
+       queues->queue_array = auxtrace_alloc_queue_array(queues->nr_queues);
+       if (!queues->queue_array)
+               return -ENOMEM;
+       return 0;
+}
+
+static int auxtrace_queues__grow(struct auxtrace_queues *queues,
+                                unsigned int new_nr_queues)
+{
+       unsigned int nr_queues = queues->nr_queues;
+       struct auxtrace_queue *queue_array;
+       unsigned int i;
+
+       if (!nr_queues)
+               nr_queues = AUXTRACE_INIT_NR_QUEUES;
+
+       while (nr_queues && nr_queues < new_nr_queues)
+               nr_queues <<= 1;
+
+       if (nr_queues < queues->nr_queues || nr_queues < new_nr_queues)
+               return -EINVAL;
+
+       queue_array = auxtrace_alloc_queue_array(nr_queues);
+       if (!queue_array)
+               return -ENOMEM;
+
+       for (i = 0; i < queues->nr_queues; i++) {
+               list_splice_tail(&queues->queue_array[i].head,
+                                &queue_array[i].head);
+               queue_array[i].priv = queues->queue_array[i].priv;
+       }
+
+       queues->nr_queues = nr_queues;
+       queues->queue_array = queue_array;
+
+       return 0;
+}
+
+static void *auxtrace_copy_data(u64 size, struct perf_session *session)
+{
+       int fd = perf_data_file__fd(session->file);
+       void *p;
+       ssize_t ret;
+
+       if (size > SSIZE_MAX)
+               return NULL;
+
+       p = malloc(size);
+       if (!p)
+               return NULL;
+
+       ret = readn(fd, p, size);
+       if (ret != (ssize_t)size) {
+               free(p);
+               return NULL;
+       }
+
+       return p;
+}
+
+static int auxtrace_queues__add_buffer(struct auxtrace_queues *queues,
+                                      unsigned int idx,
+                                      struct auxtrace_buffer *buffer)
+{
+       struct auxtrace_queue *queue;
+       int err;
+
+       if (idx >= queues->nr_queues) {
+               err = auxtrace_queues__grow(queues, idx + 1);
+               if (err)
+                       return err;
+       }
+
+       queue = &queues->queue_array[idx];
+
+       if (!queue->set) {
+               queue->set = true;
+               queue->tid = buffer->tid;
+               queue->cpu = buffer->cpu;
+       } else if (buffer->cpu != queue->cpu || buffer->tid != queue->tid) {
+               pr_err("auxtrace queue conflict: cpu %d, tid %d vs cpu %d, tid %d\n",
+                      queue->cpu, queue->tid, buffer->cpu, buffer->tid);
+               return -EINVAL;
+       }
+
+       buffer->buffer_nr = queues->next_buffer_nr++;
+
+       list_add_tail(&buffer->list, &queue->head);
+
+       queues->new_data = true;
+       queues->populated = true;
+
+       return 0;
+}
+
+/* Limit buffers to 32MiB on 32-bit */
+#define BUFFER_LIMIT_FOR_32_BIT (32 * 1024 * 1024)
+
+static int auxtrace_queues__split_buffer(struct auxtrace_queues *queues,
+                                        unsigned int idx,
+                                        struct auxtrace_buffer *buffer)
+{
+       u64 sz = buffer->size;
+       bool consecutive = false;
+       struct auxtrace_buffer *b;
+       int err;
+
+       while (sz > BUFFER_LIMIT_FOR_32_BIT) {
+               b = memdup(buffer, sizeof(struct auxtrace_buffer));
+               if (!b)
+                       return -ENOMEM;
+               b->size = BUFFER_LIMIT_FOR_32_BIT;
+               b->consecutive = consecutive;
+               err = auxtrace_queues__add_buffer(queues, idx, b);
+               if (err) {
+                       auxtrace_buffer__free(b);
+                       return err;
+               }
+               buffer->data_offset += BUFFER_LIMIT_FOR_32_BIT;
+               sz -= BUFFER_LIMIT_FOR_32_BIT;
+               consecutive = true;
+       }
+
+       buffer->size = sz;
+       buffer->consecutive = consecutive;
+
+       return 0;
+}
+
+static int auxtrace_queues__add_event_buffer(struct auxtrace_queues *queues,
+                                            struct perf_session *session,
+                                            unsigned int idx,
+                                            struct auxtrace_buffer *buffer)
+{
+       if (session->one_mmap) {
+               buffer->data = buffer->data_offset - session->one_mmap_offset +
+                              session->one_mmap_addr;
+       } else if (perf_data_file__is_pipe(session->file)) {
+               buffer->data = auxtrace_copy_data(buffer->size, session);
+               if (!buffer->data)
+                       return -ENOMEM;
+               buffer->data_needs_freeing = true;
+       } else if (BITS_PER_LONG == 32 &&
+                  buffer->size > BUFFER_LIMIT_FOR_32_BIT) {
+               int err;
+
+               err = auxtrace_queues__split_buffer(queues, idx, buffer);
+               if (err)
+                       return err;
+       }
+
+       return auxtrace_queues__add_buffer(queues, idx, buffer);
+}
+
+int auxtrace_queues__add_event(struct auxtrace_queues *queues,
+                              struct perf_session *session,
+                              union perf_event *event, off_t data_offset,
+                              struct auxtrace_buffer **buffer_ptr)
+{
+       struct auxtrace_buffer *buffer;
+       unsigned int idx;
+       int err;
+
+       buffer = zalloc(sizeof(struct auxtrace_buffer));
+       if (!buffer)
+               return -ENOMEM;
+
+       buffer->pid = -1;
+       buffer->tid = event->auxtrace.tid;
+       buffer->cpu = event->auxtrace.cpu;
+       buffer->data_offset = data_offset;
+       buffer->offset = event->auxtrace.offset;
+       buffer->reference = event->auxtrace.reference;
+       buffer->size = event->auxtrace.size;
+       idx = event->auxtrace.idx;
+
+       err = auxtrace_queues__add_event_buffer(queues, session, idx, buffer);
+       if (err)
+               goto out_err;
+
+       if (buffer_ptr)
+               *buffer_ptr = buffer;
+
+       return 0;
+
+out_err:
+       auxtrace_buffer__free(buffer);
+       return err;
+}
+
+static int auxtrace_queues__add_indexed_event(struct auxtrace_queues *queues,
+                                             struct perf_session *session,
+                                             off_t file_offset, size_t sz)
+{
+       union perf_event *event;
+       int err;
+       char buf[PERF_SAMPLE_MAX_SIZE];
+
+       err = perf_session__peek_event(session, file_offset, buf,
+                                      PERF_SAMPLE_MAX_SIZE, &event, NULL);
+       if (err)
+               return err;
+
+       if (event->header.type == PERF_RECORD_AUXTRACE) {
+               if (event->header.size < sizeof(struct auxtrace_event) ||
+                   event->header.size != sz) {
+                       err = -EINVAL;
+                       goto out;
+               }
+               file_offset += event->header.size;
+               err = auxtrace_queues__add_event(queues, session, event,
+                                                file_offset, NULL);
+       }
+out:
+       return err;
+}
+
+void auxtrace_queues__free(struct auxtrace_queues *queues)
+{
+       unsigned int i;
+
+       for (i = 0; i < queues->nr_queues; i++) {
+               while (!list_empty(&queues->queue_array[i].head)) {
+                       struct auxtrace_buffer *buffer;
+
+                       buffer = list_entry(queues->queue_array[i].head.next,
+                                           struct auxtrace_buffer, list);
+                       list_del(&buffer->list);
+                       auxtrace_buffer__free(buffer);
+               }
+       }
+
+       zfree(&queues->queue_array);
+       queues->nr_queues = 0;
+}
+
+static void auxtrace_heapify(struct auxtrace_heap_item *heap_array,
+                            unsigned int pos, unsigned int queue_nr,
+                            u64 ordinal)
+{
+       unsigned int parent;
+
+       while (pos) {
+               parent = (pos - 1) >> 1;
+               if (heap_array[parent].ordinal <= ordinal)
+                       break;
+               heap_array[pos] = heap_array[parent];
+               pos = parent;
+       }
+       heap_array[pos].queue_nr = queue_nr;
+       heap_array[pos].ordinal = ordinal;
+}
+
+int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
+                      u64 ordinal)
+{
+       struct auxtrace_heap_item *heap_array;
+
+       if (queue_nr >= heap->heap_sz) {
+               unsigned int heap_sz = AUXTRACE_INIT_NR_QUEUES;
+
+               while (heap_sz <= queue_nr)
+                       heap_sz <<= 1;
+               heap_array = realloc(heap->heap_array,
+                                    heap_sz * sizeof(struct auxtrace_heap_item));
+               if (!heap_array)
+                       return -ENOMEM;
+               heap->heap_array = heap_array;
+               heap->heap_sz = heap_sz;
+       }
+
+       auxtrace_heapify(heap->heap_array, heap->heap_cnt++, queue_nr, ordinal);
+
+       return 0;
+}
+
+void auxtrace_heap__free(struct auxtrace_heap *heap)
+{
+       zfree(&heap->heap_array);
+       heap->heap_cnt = 0;
+       heap->heap_sz = 0;
+}
+
+void auxtrace_heap__pop(struct auxtrace_heap *heap)
+{
+       unsigned int pos, last, heap_cnt = heap->heap_cnt;
+       struct auxtrace_heap_item *heap_array;
+
+       if (!heap_cnt)
+               return;
+
+       heap->heap_cnt -= 1;
+
+       heap_array = heap->heap_array;
+
+       pos = 0;
+       while (1) {
+               unsigned int left, right;
+
+               left = (pos << 1) + 1;
+               if (left >= heap_cnt)
+                       break;
+               right = left + 1;
+               if (right >= heap_cnt) {
+                       heap_array[pos] = heap_array[left];
+                       return;
+               }
+               if (heap_array[left].ordinal < heap_array[right].ordinal) {
+                       heap_array[pos] = heap_array[left];
+                       pos = left;
+               } else {
+                       heap_array[pos] = heap_array[right];
+                       pos = right;
+               }
+       }
+
+       last = heap_cnt - 1;
+       auxtrace_heapify(heap_array, pos, heap_array[last].queue_nr,
+                        heap_array[last].ordinal);
+}
+
+size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr)
+{
+       if (itr)
+               return itr->info_priv_size(itr);
+       return 0;
+}
+
+static int auxtrace_not_supported(void)
+{
+       pr_err("AUX area tracing is not supported on this architecture\n");
+       return -EINVAL;
+}
+
+int auxtrace_record__info_fill(struct auxtrace_record *itr,
+                              struct perf_session *session,
+                              struct auxtrace_info_event *auxtrace_info,
+                              size_t priv_size)
+{
+       if (itr)
+               return itr->info_fill(itr, session, auxtrace_info, priv_size);
+       return auxtrace_not_supported();
+}
+
+void auxtrace_record__free(struct auxtrace_record *itr)
+{
+       if (itr)
+               itr->free(itr);
+}
+
+int auxtrace_record__snapshot_start(struct auxtrace_record *itr)
+{
+       if (itr && itr->snapshot_start)
+               return itr->snapshot_start(itr);
+       return 0;
+}
+
+int auxtrace_record__snapshot_finish(struct auxtrace_record *itr)
+{
+       if (itr && itr->snapshot_finish)
+               return itr->snapshot_finish(itr);
+       return 0;
+}
+
+int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
+                                  struct auxtrace_mmap *mm,
+                                  unsigned char *data, u64 *head, u64 *old)
+{
+       if (itr && itr->find_snapshot)
+               return itr->find_snapshot(itr, idx, mm, data, head, old);
+       return 0;
+}
+
+int auxtrace_record__options(struct auxtrace_record *itr,
+                            struct perf_evlist *evlist,
+                            struct record_opts *opts)
+{
+       if (itr)
+               return itr->recording_options(itr, evlist, opts);
+       return 0;
+}
+
+u64 auxtrace_record__reference(struct auxtrace_record *itr)
+{
+       if (itr)
+               return itr->reference(itr);
+       return 0;
+}
+
+int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
+                                   struct record_opts *opts, const char *str)
+{
+       if (!str)
+               return 0;
+
+       if (itr)
+               return itr->parse_snapshot_options(itr, opts, str);
+
+       pr_err("No AUX area tracing to snapshot\n");
+       return -EINVAL;
+}
+
+struct auxtrace_record *__weak
+auxtrace_record__init(struct perf_evlist *evlist __maybe_unused, int *err)
+{
+       *err = 0;
+       return NULL;
+}
+
+static int auxtrace_index__alloc(struct list_head *head)
+{
+       struct auxtrace_index *auxtrace_index;
+
+       auxtrace_index = malloc(sizeof(struct auxtrace_index));
+       if (!auxtrace_index)
+               return -ENOMEM;
+
+       auxtrace_index->nr = 0;
+       INIT_LIST_HEAD(&auxtrace_index->list);
+
+       list_add_tail(&auxtrace_index->list, head);
+
+       return 0;
+}
+
+void auxtrace_index__free(struct list_head *head)
+{
+       struct auxtrace_index *auxtrace_index, *n;
+
+       list_for_each_entry_safe(auxtrace_index, n, head, list) {
+               list_del(&auxtrace_index->list);
+               free(auxtrace_index);
+       }
+}
+
+static struct auxtrace_index *auxtrace_index__last(struct list_head *head)
+{
+       struct auxtrace_index *auxtrace_index;
+       int err;
+
+       if (list_empty(head)) {
+               err = auxtrace_index__alloc(head);
+               if (err)
+                       return NULL;
+       }
+
+       auxtrace_index = list_entry(head->prev, struct auxtrace_index, list);
+
+       if (auxtrace_index->nr >= PERF_AUXTRACE_INDEX_ENTRY_COUNT) {
+               err = auxtrace_index__alloc(head);
+               if (err)
+                       return NULL;
+               auxtrace_index = list_entry(head->prev, struct auxtrace_index,
+                                           list);
+       }
+
+       return auxtrace_index;
+}
+
+int auxtrace_index__auxtrace_event(struct list_head *head,
+                                  union perf_event *event, off_t file_offset)
+{
+       struct auxtrace_index *auxtrace_index;
+       size_t nr;
+
+       auxtrace_index = auxtrace_index__last(head);
+       if (!auxtrace_index)
+               return -ENOMEM;
+
+       nr = auxtrace_index->nr;
+       auxtrace_index->entries[nr].file_offset = file_offset;
+       auxtrace_index->entries[nr].sz = event->header.size;
+       auxtrace_index->nr += 1;
+
+       return 0;
+}
+
+static int auxtrace_index__do_write(int fd,
+                                   struct auxtrace_index *auxtrace_index)
+{
+       struct auxtrace_index_entry ent;
+       size_t i;
+
+       for (i = 0; i < auxtrace_index->nr; i++) {
+               ent.file_offset = auxtrace_index->entries[i].file_offset;
+               ent.sz = auxtrace_index->entries[i].sz;
+               if (writen(fd, &ent, sizeof(ent)) != sizeof(ent))
+                       return -errno;
+       }
+       return 0;
+}
+
+int auxtrace_index__write(int fd, struct list_head *head)
+{
+       struct auxtrace_index *auxtrace_index;
+       u64 total = 0;
+       int err;
+
+       list_for_each_entry(auxtrace_index, head, list)
+               total += auxtrace_index->nr;
+
+       if (writen(fd, &total, sizeof(total)) != sizeof(total))
+               return -errno;
+
+       list_for_each_entry(auxtrace_index, head, list) {
+               err = auxtrace_index__do_write(fd, auxtrace_index);
+               if (err)
+                       return err;
+       }
+
+       return 0;
+}
+
+static int auxtrace_index__process_entry(int fd, struct list_head *head,
+                                        bool needs_swap)
+{
+       struct auxtrace_index *auxtrace_index;
+       struct auxtrace_index_entry ent;
+       size_t nr;
+
+       if (readn(fd, &ent, sizeof(ent)) != sizeof(ent))
+               return -1;
+
+       auxtrace_index = auxtrace_index__last(head);
+       if (!auxtrace_index)
+               return -1;
+
+       nr = auxtrace_index->nr;
+       if (needs_swap) {
+               auxtrace_index->entries[nr].file_offset =
+                                               bswap_64(ent.file_offset);
+               auxtrace_index->entries[nr].sz = bswap_64(ent.sz);
+       } else {
+               auxtrace_index->entries[nr].file_offset = ent.file_offset;
+               auxtrace_index->entries[nr].sz = ent.sz;
+       }
+
+       auxtrace_index->nr = nr + 1;
+
+       return 0;
+}
+
+int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
+                           bool needs_swap)
+{
+       struct list_head *head = &session->auxtrace_index;
+       u64 nr;
+
+       if (readn(fd, &nr, sizeof(u64)) != sizeof(u64))
+               return -1;
+
+       if (needs_swap)
+               nr = bswap_64(nr);
+
+       if (sizeof(u64) + nr * sizeof(struct auxtrace_index_entry) > size)
+               return -1;
+
+       while (nr--) {
+               int err;
+
+               err = auxtrace_index__process_entry(fd, head, needs_swap);
+               if (err)
+                       return -1;
+       }
+
+       return 0;
+}
+
+static int auxtrace_queues__process_index_entry(struct auxtrace_queues *queues,
+                                               struct perf_session *session,
+                                               struct auxtrace_index_entry *ent)
+{
+       return auxtrace_queues__add_indexed_event(queues, session,
+                                                 ent->file_offset, ent->sz);
+}
+
+int auxtrace_queues__process_index(struct auxtrace_queues *queues,
+                                  struct perf_session *session)
+{
+       struct auxtrace_index *auxtrace_index;
+       struct auxtrace_index_entry *ent;
+       size_t i;
+       int err;
+
+       list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) {
+               for (i = 0; i < auxtrace_index->nr; i++) {
+                       ent = &auxtrace_index->entries[i];
+                       err = auxtrace_queues__process_index_entry(queues,
+                                                                  session,
+                                                                  ent);
+                       if (err)
+                               return err;
+               }
+       }
+       return 0;
+}
+
+struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
+                                             struct auxtrace_buffer *buffer)
+{
+       if (buffer) {
+               if (list_is_last(&buffer->list, &queue->head))
+                       return NULL;
+               return list_entry(buffer->list.next, struct auxtrace_buffer,
+                                 list);
+       } else {
+               if (list_empty(&queue->head))
+                       return NULL;
+               return list_entry(queue->head.next, struct auxtrace_buffer,
+                                 list);
+       }
+}
+
+void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd)
+{
+       size_t adj = buffer->data_offset & (page_size - 1);
+       size_t size = buffer->size + adj;
+       off_t file_offset = buffer->data_offset - adj;
+       void *addr;
+
+       if (buffer->data)
+               return buffer->data;
+
+       addr = mmap(NULL, size, PROT_READ, MAP_SHARED, fd, file_offset);
+       if (addr == MAP_FAILED)
+               return NULL;
+
+       buffer->mmap_addr = addr;
+       buffer->mmap_size = size;
+
+       buffer->data = addr + adj;
+
+       return buffer->data;
+}
+
+void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer)
+{
+       if (!buffer->data || !buffer->mmap_addr)
+               return;
+       munmap(buffer->mmap_addr, buffer->mmap_size);
+       buffer->mmap_addr = NULL;
+       buffer->mmap_size = 0;
+       buffer->data = NULL;
+       buffer->use_data = NULL;
+}
+
+void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer)
+{
+       auxtrace_buffer__put_data(buffer);
+       if (buffer->data_needs_freeing) {
+               buffer->data_needs_freeing = false;
+               zfree(&buffer->data);
+               buffer->use_data = NULL;
+               buffer->size = 0;
+       }
+}
+
+void auxtrace_buffer__free(struct auxtrace_buffer *buffer)
+{
+       auxtrace_buffer__drop_data(buffer);
+       free(buffer);
+}
+
+void auxtrace_synth_error(struct auxtrace_error_event *auxtrace_error, int type,
+                         int code, int cpu, pid_t pid, pid_t tid, u64 ip,
+                         const char *msg)
+{
+       size_t size;
+
+       memset(auxtrace_error, 0, sizeof(struct auxtrace_error_event));
+
+       auxtrace_error->header.type = PERF_RECORD_AUXTRACE_ERROR;
+       auxtrace_error->type = type;
+       auxtrace_error->code = code;
+       auxtrace_error->cpu = cpu;
+       auxtrace_error->pid = pid;
+       auxtrace_error->tid = tid;
+       auxtrace_error->ip = ip;
+       strlcpy(auxtrace_error->msg, msg, MAX_AUXTRACE_ERROR_MSG);
+
+       size = (void *)auxtrace_error->msg - (void *)auxtrace_error +
+              strlen(auxtrace_error->msg) + 1;
+       auxtrace_error->header.size = PERF_ALIGN(size, sizeof(u64));
+}
+
+int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
+                                        struct perf_tool *tool,
+                                        struct perf_session *session,
+                                        perf_event__handler_t process)
+{
+       union perf_event *ev;
+       size_t priv_size;
+       int err;
+
+       pr_debug2("Synthesizing auxtrace information\n");
+       priv_size = auxtrace_record__info_priv_size(itr);
+       ev = zalloc(sizeof(struct auxtrace_info_event) + priv_size);
+       if (!ev)
+               return -ENOMEM;
+
+       ev->auxtrace_info.header.type = PERF_RECORD_AUXTRACE_INFO;
+       ev->auxtrace_info.header.size = sizeof(struct auxtrace_info_event) +
+                                       priv_size;
+       err = auxtrace_record__info_fill(itr, session, &ev->auxtrace_info,
+                                        priv_size);
+       if (err)
+               goto out_free;
+
+       err = process(tool, ev, NULL, NULL);
+out_free:
+       free(ev);
+       return err;
+}
+
+static bool auxtrace__dont_decode(struct perf_session *session)
+{
+       return !session->itrace_synth_opts ||
+              session->itrace_synth_opts->dont_decode;
+}
+
+int perf_event__process_auxtrace_info(struct perf_tool *tool __maybe_unused,
+                                     union perf_event *event,
+                                     struct perf_session *session __maybe_unused)
+{
+       enum auxtrace_type type = event->auxtrace_info.type;
+
+       if (dump_trace)
+               fprintf(stdout, " type: %u\n", type);
+
+       switch (type) {
+       case PERF_AUXTRACE_UNKNOWN:
+       default:
+               return -EINVAL;
+       }
+}
+
+s64 perf_event__process_auxtrace(struct perf_tool *tool,
+                                union perf_event *event,
+                                struct perf_session *session)
+{
+       s64 err;
+
+       if (dump_trace)
+               fprintf(stdout, " size: %#"PRIx64"  offset: %#"PRIx64"  ref: %#"PRIx64"  idx: %u  tid: %d  cpu: %d\n",
+                       event->auxtrace.size, event->auxtrace.offset,
+                       event->auxtrace.reference, event->auxtrace.idx,
+                       event->auxtrace.tid, event->auxtrace.cpu);
+
+       if (auxtrace__dont_decode(session))
+               return event->auxtrace.size;
+
+       if (!session->auxtrace || event->header.type != PERF_RECORD_AUXTRACE)
+               return -EINVAL;
+
+       err = session->auxtrace->process_auxtrace_event(session, event, tool);
+       if (err < 0)
+               return err;
+
+       return event->auxtrace.size;
+}
+
+#define PERF_ITRACE_DEFAULT_PERIOD_TYPE                PERF_ITRACE_PERIOD_NANOSECS
+#define PERF_ITRACE_DEFAULT_PERIOD             100000
+#define PERF_ITRACE_DEFAULT_CALLCHAIN_SZ       16
+#define PERF_ITRACE_MAX_CALLCHAIN_SZ           1024
+
+void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts)
+{
+       synth_opts->instructions = true;
+       synth_opts->branches = true;
+       synth_opts->transactions = true;
+       synth_opts->errors = true;
+       synth_opts->period_type = PERF_ITRACE_DEFAULT_PERIOD_TYPE;
+       synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
+       synth_opts->callchain_sz = PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
+}
+
+/*
+ * Please check tools/perf/Documentation/perf-script.txt for information
+ * about the options parsed here, which is introduced after this cset,
+ * when support in 'perf script' for these options is introduced.
+ */
+int itrace_parse_synth_opts(const struct option *opt, const char *str,
+                           int unset)
+{
+       struct itrace_synth_opts *synth_opts = opt->value;
+       const char *p;
+       char *endptr;
+
+       synth_opts->set = true;
+
+       if (unset) {
+               synth_opts->dont_decode = true;
+               return 0;
+       }
+
+       if (!str) {
+               itrace_synth_opts__set_default(synth_opts);
+               return 0;
+       }
+
+       for (p = str; *p;) {
+               switch (*p++) {
+               case 'i':
+                       synth_opts->instructions = true;
+                       while (*p == ' ' || *p == ',')
+                               p += 1;
+                       if (isdigit(*p)) {
+                               synth_opts->period = strtoull(p, &endptr, 10);
+                               p = endptr;
+                               while (*p == ' ' || *p == ',')
+                                       p += 1;
+                               switch (*p++) {
+                               case 'i':
+                                       synth_opts->period_type =
+                                               PERF_ITRACE_PERIOD_INSTRUCTIONS;
+                                       break;
+                               case 't':
+                                       synth_opts->period_type =
+                                               PERF_ITRACE_PERIOD_TICKS;
+                                       break;
+                               case 'm':
+                                       synth_opts->period *= 1000;
+                                       /* Fall through */
+                               case 'u':
+                                       synth_opts->period *= 1000;
+                                       /* Fall through */
+                               case 'n':
+                                       if (*p++ != 's')
+                                               goto out_err;
+                                       synth_opts->period_type =
+                                               PERF_ITRACE_PERIOD_NANOSECS;
+                                       break;
+                               case '\0':
+                                       goto out;
+                               default:
+                                       goto out_err;
+                               }
+                       }
+                       break;
+               case 'b':
+                       synth_opts->branches = true;
+                       break;
+               case 'x':
+                       synth_opts->transactions = true;
+                       break;
+               case 'e':
+                       synth_opts->errors = true;
+                       break;
+               case 'd':
+                       synth_opts->log = true;
+                       break;
+               case 'c':
+                       synth_opts->branches = true;
+                       synth_opts->calls = true;
+                       break;
+               case 'r':
+                       synth_opts->branches = true;
+                       synth_opts->returns = true;
+                       break;
+               case 'g':
+                       synth_opts->callchain = true;
+                       synth_opts->callchain_sz =
+                                       PERF_ITRACE_DEFAULT_CALLCHAIN_SZ;
+                       while (*p == ' ' || *p == ',')
+                               p += 1;
+                       if (isdigit(*p)) {
+                               unsigned int val;
+
+                               val = strtoul(p, &endptr, 10);
+                               p = endptr;
+                               if (!val || val > PERF_ITRACE_MAX_CALLCHAIN_SZ)
+                                       goto out_err;
+                               synth_opts->callchain_sz = val;
+                       }
+                       break;
+               case ' ':
+               case ',':
+                       break;
+               default:
+                       goto out_err;
+               }
+       }
+out:
+       if (synth_opts->instructions) {
+               if (!synth_opts->period_type)
+                       synth_opts->period_type =
+                                       PERF_ITRACE_DEFAULT_PERIOD_TYPE;
+               if (!synth_opts->period)
+                       synth_opts->period = PERF_ITRACE_DEFAULT_PERIOD;
+       }
+
+       return 0;
+
+out_err:
+       pr_err("Bad Instruction Tracing options '%s'\n", str);
+       return -EINVAL;
+}
+
+static const char * const auxtrace_error_type_name[] = {
+       [PERF_AUXTRACE_ERROR_ITRACE] = "instruction trace",
+};
+
+static const char *auxtrace_error_name(int type)
+{
+       const char *error_type_name = NULL;
+
+       if (type < PERF_AUXTRACE_ERROR_MAX)
+               error_type_name = auxtrace_error_type_name[type];
+       if (!error_type_name)
+               error_type_name = "unknown AUX";
+       return error_type_name;
+}
+
+size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp)
+{
+       struct auxtrace_error_event *e = &event->auxtrace_error;
+       int ret;
+
+       ret = fprintf(fp, " %s error type %u",
+                     auxtrace_error_name(e->type), e->type);
+       ret += fprintf(fp, " cpu %d pid %d tid %d ip %#"PRIx64" code %u: %s\n",
+                      e->cpu, e->pid, e->tid, e->ip, e->code, e->msg);
+       return ret;
+}
+
+void perf_session__auxtrace_error_inc(struct perf_session *session,
+                                     union perf_event *event)
+{
+       struct auxtrace_error_event *e = &event->auxtrace_error;
+
+       if (e->type < PERF_AUXTRACE_ERROR_MAX)
+               session->evlist->stats.nr_auxtrace_errors[e->type] += 1;
+}
+
+void events_stats__auxtrace_error_warn(const struct events_stats *stats)
+{
+       int i;
+
+       for (i = 0; i < PERF_AUXTRACE_ERROR_MAX; i++) {
+               if (!stats->nr_auxtrace_errors[i])
+                       continue;
+               ui__warning("%u %s errors\n",
+                           stats->nr_auxtrace_errors[i],
+                           auxtrace_error_name(i));
+       }
+}
+
+int perf_event__process_auxtrace_error(struct perf_tool *tool __maybe_unused,
+                                      union perf_event *event,
+                                      struct perf_session *session)
+{
+       if (auxtrace__dont_decode(session))
+               return 0;
+
+       perf_event__fprintf_auxtrace_error(event, stdout);
+       return 0;
+}
+
+static int __auxtrace_mmap__read(struct auxtrace_mmap *mm,
+                                struct auxtrace_record *itr,
+                                struct perf_tool *tool, process_auxtrace_t fn,
+                                bool snapshot, size_t snapshot_size)
+{
+       u64 head, old = mm->prev, offset, ref;
+       unsigned char *data = mm->base;
+       size_t size, head_off, old_off, len1, len2, padding;
+       union perf_event ev;
+       void *data1, *data2;
+
+       if (snapshot) {
+               head = auxtrace_mmap__read_snapshot_head(mm);
+               if (auxtrace_record__find_snapshot(itr, mm->idx, mm, data,
+                                                  &head, &old))
+                       return -1;
+       } else {
+               head = auxtrace_mmap__read_head(mm);
+       }
+
+       if (old == head)
+               return 0;
+
+       pr_debug3("auxtrace idx %d old %#"PRIx64" head %#"PRIx64" diff %#"PRIx64"\n",
+                 mm->idx, old, head, head - old);
+
+       if (mm->mask) {
+               head_off = head & mm->mask;
+               old_off = old & mm->mask;
+       } else {
+               head_off = head % mm->len;
+               old_off = old % mm->len;
+       }
+
+       if (head_off > old_off)
+               size = head_off - old_off;
+       else
+               size = mm->len - (old_off - head_off);
+
+       if (snapshot && size > snapshot_size)
+               size = snapshot_size;
+
+       ref = auxtrace_record__reference(itr);
+
+       if (head > old || size <= head || mm->mask) {
+               offset = head - size;
+       } else {
+               /*
+                * When the buffer size is not a power of 2, 'head' wraps at the
+                * highest multiple of the buffer size, so we have to subtract
+                * the remainder here.
+                */
+               u64 rem = (0ULL - mm->len) % mm->len;
+
+               offset = head - size - rem;
+       }
+
+       if (size > head_off) {
+               len1 = size - head_off;
+               data1 = &data[mm->len - len1];
+               len2 = head_off;
+               data2 = &data[0];
+       } else {
+               len1 = size;
+               data1 = &data[head_off - len1];
+               len2 = 0;
+               data2 = NULL;
+       }
+
+       /* padding must be written by fn() e.g. record__process_auxtrace() */
+       padding = size & 7;
+       if (padding)
+               padding = 8 - padding;
+
+       memset(&ev, 0, sizeof(ev));
+       ev.auxtrace.header.type = PERF_RECORD_AUXTRACE;
+       ev.auxtrace.header.size = sizeof(ev.auxtrace);
+       ev.auxtrace.size = size + padding;
+       ev.auxtrace.offset = offset;
+       ev.auxtrace.reference = ref;
+       ev.auxtrace.idx = mm->idx;
+       ev.auxtrace.tid = mm->tid;
+       ev.auxtrace.cpu = mm->cpu;
+
+       if (fn(tool, &ev, data1, len1, data2, len2))
+               return -1;
+
+       mm->prev = head;
+
+       if (!snapshot) {
+               auxtrace_mmap__write_tail(mm, head);
+               if (itr->read_finish) {
+                       int err;
+
+                       err = itr->read_finish(itr, mm->idx);
+                       if (err < 0)
+                               return err;
+               }
+       }
+
+       return 1;
+}
+
+int auxtrace_mmap__read(struct auxtrace_mmap *mm, struct auxtrace_record *itr,
+                       struct perf_tool *tool, process_auxtrace_t fn)
+{
+       return __auxtrace_mmap__read(mm, itr, tool, fn, false, 0);
+}
+
+int auxtrace_mmap__read_snapshot(struct auxtrace_mmap *mm,
+                                struct auxtrace_record *itr,
+                                struct perf_tool *tool, process_auxtrace_t fn,
+                                size_t snapshot_size)
+{
+       return __auxtrace_mmap__read(mm, itr, tool, fn, true, snapshot_size);
+}
+
+/**
+ * struct auxtrace_cache - hash table to implement a cache
+ * @hashtable: the hashtable
+ * @sz: hashtable size (number of hlists)
+ * @entry_size: size of an entry
+ * @limit: limit the number of entries to this maximum, when reached the cache
+ *         is dropped and caching begins again with an empty cache
+ * @cnt: current number of entries
+ * @bits: hashtable size (@sz = 2^@bits)
+ */
+struct auxtrace_cache {
+       struct hlist_head *hashtable;
+       size_t sz;
+       size_t entry_size;
+       size_t limit;
+       size_t cnt;
+       unsigned int bits;
+};
+
+struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
+                                          unsigned int limit_percent)
+{
+       struct auxtrace_cache *c;
+       struct hlist_head *ht;
+       size_t sz, i;
+
+       c = zalloc(sizeof(struct auxtrace_cache));
+       if (!c)
+               return NULL;
+
+       sz = 1UL << bits;
+
+       ht = calloc(sz, sizeof(struct hlist_head));
+       if (!ht)
+               goto out_free;
+
+       for (i = 0; i < sz; i++)
+               INIT_HLIST_HEAD(&ht[i]);
+
+       c->hashtable = ht;
+       c->sz = sz;
+       c->entry_size = entry_size;
+       c->limit = (c->sz * limit_percent) / 100;
+       c->bits = bits;
+
+       return c;
+
+out_free:
+       free(c);
+       return NULL;
+}
+
+static void auxtrace_cache__drop(struct auxtrace_cache *c)
+{
+       struct auxtrace_cache_entry *entry;
+       struct hlist_node *tmp;
+       size_t i;
+
+       if (!c)
+               return;
+
+       for (i = 0; i < c->sz; i++) {
+               hlist_for_each_entry_safe(entry, tmp, &c->hashtable[i], hash) {
+                       hlist_del(&entry->hash);
+                       auxtrace_cache__free_entry(c, entry);
+               }
+       }
+
+       c->cnt = 0;
+}
+
+void auxtrace_cache__free(struct auxtrace_cache *c)
+{
+       if (!c)
+               return;
+
+       auxtrace_cache__drop(c);
+       free(c->hashtable);
+       free(c);
+}
+
+void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c)
+{
+       return malloc(c->entry_size);
+}
+
+void auxtrace_cache__free_entry(struct auxtrace_cache *c __maybe_unused,
+                               void *entry)
+{
+       free(entry);
+}
+
+int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
+                       struct auxtrace_cache_entry *entry)
+{
+       if (c->limit && ++c->cnt > c->limit)
+               auxtrace_cache__drop(c);
+
+       entry->key = key;
+       hlist_add_head(&entry->hash, &c->hashtable[hash_32(key, c->bits)]);
+
+       return 0;
+}
+
+void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key)
+{
+       struct auxtrace_cache_entry *entry;
+       struct hlist_head *hlist;
+
+       if (!c)
+               return NULL;
+
+       hlist = &c->hashtable[hash_32(key, c->bits)];
+       hlist_for_each_entry(entry, hlist, hash) {
+               if (entry->key == key)
+                       return entry;
+       }
+
+       return NULL;
+}
diff --git a/tools/perf/util/auxtrace.h b/tools/perf/util/auxtrace.h
new file mode 100644 (file)
index 0000000..a171abb
--- /dev/null
@@ -0,0 +1,643 @@
+/*
+ * auxtrace.h: AUX area trace support
+ * Copyright (c) 2013-2015, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef __PERF_AUXTRACE_H
+#define __PERF_AUXTRACE_H
+
+#include <sys/types.h>
+#include <stdbool.h>
+#include <stddef.h>
+#include <linux/list.h>
+#include <linux/perf_event.h>
+#include <linux/types.h>
+
+#include "../perf.h"
+#include "event.h"
+#include "session.h"
+#include "debug.h"
+
+union perf_event;
+struct perf_session;
+struct perf_evlist;
+struct perf_tool;
+struct option;
+struct record_opts;
+struct auxtrace_info_event;
+struct events_stats;
+
+enum auxtrace_type {
+       PERF_AUXTRACE_UNKNOWN,
+};
+
+enum itrace_period_type {
+       PERF_ITRACE_PERIOD_INSTRUCTIONS,
+       PERF_ITRACE_PERIOD_TICKS,
+       PERF_ITRACE_PERIOD_NANOSECS,
+};
+
+/**
+ * struct itrace_synth_opts - AUX area tracing synthesis options.
+ * @set: indicates whether or not options have been set
+ * @inject: indicates the event (not just the sample) must be fully synthesized
+ *          because 'perf inject' will write it out
+ * @instructions: whether to synthesize 'instructions' events
+ * @branches: whether to synthesize 'branches' events
+ * @transactions: whether to synthesize events for transactions
+ * @errors: whether to synthesize decoder error events
+ * @dont_decode: whether to skip decoding entirely
+ * @log: write a decoding log
+ * @calls: limit branch samples to calls (can be combined with @returns)
+ * @returns: limit branch samples to returns (can be combined with @calls)
+ * @callchain: add callchain to 'instructions' events
+ * @callchain_sz: maximum callchain size
+ * @period: 'instructions' events period
+ * @period_type: 'instructions' events period type
+ */
+struct itrace_synth_opts {
+       bool                    set;
+       bool                    inject;
+       bool                    instructions;
+       bool                    branches;
+       bool                    transactions;
+       bool                    errors;
+       bool                    dont_decode;
+       bool                    log;
+       bool                    calls;
+       bool                    returns;
+       bool                    callchain;
+       unsigned int            callchain_sz;
+       unsigned long long      period;
+       enum itrace_period_type period_type;
+};
+
+/**
+ * struct auxtrace_index_entry - indexes a AUX area tracing event within a
+ *                               perf.data file.
+ * @file_offset: offset within the perf.data file
+ * @sz: size of the event
+ */
+struct auxtrace_index_entry {
+       u64                     file_offset;
+       u64                     sz;
+};
+
+#define PERF_AUXTRACE_INDEX_ENTRY_COUNT 256
+
+/**
+ * struct auxtrace_index - index of AUX area tracing events within a perf.data
+ *                         file.
+ * @list: linking a number of arrays of entries
+ * @nr: number of entries
+ * @entries: array of entries
+ */
+struct auxtrace_index {
+       struct list_head        list;
+       size_t                  nr;
+       struct auxtrace_index_entry entries[PERF_AUXTRACE_INDEX_ENTRY_COUNT];
+};
+
+/**
+ * struct auxtrace - session callbacks to allow AUX area data decoding.
+ * @process_event: lets the decoder see all session events
+ * @flush_events: process any remaining data
+ * @free_events: free resources associated with event processing
+ * @free: free resources associated with the session
+ */
+struct auxtrace {
+       int (*process_event)(struct perf_session *session,
+                            union perf_event *event,
+                            struct perf_sample *sample,
+                            struct perf_tool *tool);
+       int (*process_auxtrace_event)(struct perf_session *session,
+                                     union perf_event *event,
+                                     struct perf_tool *tool);
+       int (*flush_events)(struct perf_session *session,
+                           struct perf_tool *tool);
+       void (*free_events)(struct perf_session *session);
+       void (*free)(struct perf_session *session);
+};
+
+/**
+ * struct auxtrace_buffer - a buffer containing AUX area tracing data.
+ * @list: buffers are queued in a list held by struct auxtrace_queue
+ * @size: size of the buffer in bytes
+ * @pid: in per-thread mode, the pid this buffer is associated with
+ * @tid: in per-thread mode, the tid this buffer is associated with
+ * @cpu: in per-cpu mode, the cpu this buffer is associated with
+ * @data: actual buffer data (can be null if the data has not been loaded)
+ * @data_offset: file offset at which the buffer can be read
+ * @mmap_addr: mmap address at which the buffer can be read
+ * @mmap_size: size of the mmap at @mmap_addr
+ * @data_needs_freeing: @data was malloc'd so free it when it is no longer
+ *                      needed
+ * @consecutive: the original data was split up and this buffer is consecutive
+ *               to the previous buffer
+ * @offset: offset as determined by aux_head / aux_tail members of struct
+ *          perf_event_mmap_page
+ * @reference: an implementation-specific reference determined when the data is
+ *             recorded
+ * @buffer_nr: used to number each buffer
+ * @use_size: implementation actually only uses this number of bytes
+ * @use_data: implementation actually only uses data starting at this address
+ */
+struct auxtrace_buffer {
+       struct list_head        list;
+       size_t                  size;
+       pid_t                   pid;
+       pid_t                   tid;
+       int                     cpu;
+       void                    *data;
+       off_t                   data_offset;
+       void                    *mmap_addr;
+       size_t                  mmap_size;
+       bool                    data_needs_freeing;
+       bool                    consecutive;
+       u64                     offset;
+       u64                     reference;
+       u64                     buffer_nr;
+       size_t                  use_size;
+       void                    *use_data;
+};
+
+/**
+ * struct auxtrace_queue - a queue of AUX area tracing data buffers.
+ * @head: head of buffer list
+ * @tid: in per-thread mode, the tid this queue is associated with
+ * @cpu: in per-cpu mode, the cpu this queue is associated with
+ * @set: %true once this queue has been dedicated to a specific thread or cpu
+ * @priv: implementation-specific data
+ */
+struct auxtrace_queue {
+       struct list_head        head;
+       pid_t                   tid;
+       int                     cpu;
+       bool                    set;
+       void                    *priv;
+};
+
+/**
+ * struct auxtrace_queues - an array of AUX area tracing queues.
+ * @queue_array: array of queues
+ * @nr_queues: number of queues
+ * @new_data: set whenever new data is queued
+ * @populated: queues have been fully populated using the auxtrace_index
+ * @next_buffer_nr: used to number each buffer
+ */
+struct auxtrace_queues {
+       struct auxtrace_queue   *queue_array;
+       unsigned int            nr_queues;
+       bool                    new_data;
+       bool                    populated;
+       u64                     next_buffer_nr;
+};
+
+/**
+ * struct auxtrace_heap_item - element of struct auxtrace_heap.
+ * @queue_nr: queue number
+ * @ordinal: value used for sorting (lowest ordinal is top of the heap) expected
+ *           to be a timestamp
+ */
+struct auxtrace_heap_item {
+       unsigned int            queue_nr;
+       u64                     ordinal;
+};
+
+/**
+ * struct auxtrace_heap - a heap suitable for sorting AUX area tracing queues.
+ * @heap_array: the heap
+ * @heap_cnt: the number of elements in the heap
+ * @heap_sz: maximum number of elements (grows as needed)
+ */
+struct auxtrace_heap {
+       struct auxtrace_heap_item       *heap_array;
+       unsigned int            heap_cnt;
+       unsigned int            heap_sz;
+};
+
+/**
+ * struct auxtrace_mmap - records an mmap of the auxtrace buffer.
+ * @base: address of mapped area
+ * @userpg: pointer to buffer's perf_event_mmap_page
+ * @mask: %0 if @len is not a power of two, otherwise (@len - %1)
+ * @len: size of mapped area
+ * @prev: previous aux_head
+ * @idx: index of this mmap
+ * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
+ *       mmap) otherwise %0
+ * @cpu: cpu number for a per-cpu mmap otherwise %-1
+ */
+struct auxtrace_mmap {
+       void            *base;
+       void            *userpg;
+       size_t          mask;
+       size_t          len;
+       u64             prev;
+       int             idx;
+       pid_t           tid;
+       int             cpu;
+};
+
+/**
+ * struct auxtrace_mmap_params - parameters to set up struct auxtrace_mmap.
+ * @mask: %0 if @len is not a power of two, otherwise (@len - %1)
+ * @offset: file offset of mapped area
+ * @len: size of mapped area
+ * @prot: mmap memory protection
+ * @idx: index of this mmap
+ * @tid: tid for a per-thread mmap (also set if there is only 1 tid on a per-cpu
+ *       mmap) otherwise %0
+ * @cpu: cpu number for a per-cpu mmap otherwise %-1
+ */
+struct auxtrace_mmap_params {
+       size_t          mask;
+       off_t           offset;
+       size_t          len;
+       int             prot;
+       int             idx;
+       pid_t           tid;
+       int             cpu;
+};
+
+/**
+ * struct auxtrace_record - callbacks for recording AUX area data.
+ * @recording_options: validate and process recording options
+ * @info_priv_size: return the size of the private data in auxtrace_info_event
+ * @info_fill: fill-in the private data in auxtrace_info_event
+ * @free: free this auxtrace record structure
+ * @snapshot_start: starting a snapshot
+ * @snapshot_finish: finishing a snapshot
+ * @find_snapshot: find data to snapshot within auxtrace mmap
+ * @parse_snapshot_options: parse snapshot options
+ * @reference: provide a 64-bit reference number for auxtrace_event
+ * @read_finish: called after reading from an auxtrace mmap
+ */
+struct auxtrace_record {
+       int (*recording_options)(struct auxtrace_record *itr,
+                                struct perf_evlist *evlist,
+                                struct record_opts *opts);
+       size_t (*info_priv_size)(struct auxtrace_record *itr);
+       int (*info_fill)(struct auxtrace_record *itr,
+                        struct perf_session *session,
+                        struct auxtrace_info_event *auxtrace_info,
+                        size_t priv_size);
+       void (*free)(struct auxtrace_record *itr);
+       int (*snapshot_start)(struct auxtrace_record *itr);
+       int (*snapshot_finish)(struct auxtrace_record *itr);
+       int (*find_snapshot)(struct auxtrace_record *itr, int idx,
+                            struct auxtrace_mmap *mm, unsigned char *data,
+                            u64 *head, u64 *old);
+       int (*parse_snapshot_options)(struct auxtrace_record *itr,
+                                     struct record_opts *opts,
+                                     const char *str);
+       u64 (*reference)(struct auxtrace_record *itr);
+       int (*read_finish)(struct auxtrace_record *itr, int idx);
+};
+
+#ifdef HAVE_AUXTRACE_SUPPORT
+
+/*
+ * In snapshot mode the mmapped page is read-only which makes using
+ * __sync_val_compare_and_swap() problematic.  However, snapshot mode expects
+ * the buffer is not updated while the snapshot is made (e.g. Intel PT disables
+ * the event) so there is not a race anyway.
+ */
+static inline u64 auxtrace_mmap__read_snapshot_head(struct auxtrace_mmap *mm)
+{
+       struct perf_event_mmap_page *pc = mm->userpg;
+       u64 head = ACCESS_ONCE(pc->aux_head);
+
+       /* Ensure all reads are done after we read the head */
+       rmb();
+       return head;
+}
+
+static inline u64 auxtrace_mmap__read_head(struct auxtrace_mmap *mm)
+{
+       struct perf_event_mmap_page *pc = mm->userpg;
+#if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
+       u64 head = ACCESS_ONCE(pc->aux_head);
+#else
+       u64 head = __sync_val_compare_and_swap(&pc->aux_head, 0, 0);
+#endif
+
+       /* Ensure all reads are done after we read the head */
+       rmb();
+       return head;
+}
+
+static inline void auxtrace_mmap__write_tail(struct auxtrace_mmap *mm, u64 tail)
+{
+       struct perf_event_mmap_page *pc = mm->userpg;
+#if BITS_PER_LONG != 64 && defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
+       u64 old_tail;
+#endif
+
+       /* Ensure all reads are done before we write the tail out */
+       mb();
+#if BITS_PER_LONG == 64 || !defined(HAVE_SYNC_COMPARE_AND_SWAP_SUPPORT)
+       pc->aux_tail = tail;
+#else
+       do {
+               old_tail = __sync_val_compare_and_swap(&pc->aux_tail, 0, 0);
+       } while (!__sync_bool_compare_and_swap(&pc->aux_tail, old_tail, tail));
+#endif
+}
+
+int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
+                       struct auxtrace_mmap_params *mp,
+                       void *userpg, int fd);
+void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
+void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
+                               off_t auxtrace_offset,
+                               unsigned int auxtrace_pages,
+                               bool auxtrace_overwrite);
+void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
+                                  struct perf_evlist *evlist, int idx,
+                                  bool per_cpu);
+
+typedef int (*process_auxtrace_t)(struct perf_tool *tool,
+                                 union perf_event *event, void *data1,
+                                 size_t len1, void *data2, size_t len2);
+
+int auxtrace_mmap__read(struct auxtrace_mmap *mm, struct auxtrace_record *itr,
+                       struct perf_tool *tool, process_auxtrace_t fn);
+
+int auxtrace_mmap__read_snapshot(struct auxtrace_mmap *mm,
+                                struct auxtrace_record *itr,
+                                struct perf_tool *tool, process_auxtrace_t fn,
+                                size_t snapshot_size);
+
+int auxtrace_queues__init(struct auxtrace_queues *queues);
+int auxtrace_queues__add_event(struct auxtrace_queues *queues,
+                              struct perf_session *session,
+                              union perf_event *event, off_t data_offset,
+                              struct auxtrace_buffer **buffer_ptr);
+void auxtrace_queues__free(struct auxtrace_queues *queues);
+int auxtrace_queues__process_index(struct auxtrace_queues *queues,
+                                  struct perf_session *session);
+struct auxtrace_buffer *auxtrace_buffer__next(struct auxtrace_queue *queue,
+                                             struct auxtrace_buffer *buffer);
+void *auxtrace_buffer__get_data(struct auxtrace_buffer *buffer, int fd);
+void auxtrace_buffer__put_data(struct auxtrace_buffer *buffer);
+void auxtrace_buffer__drop_data(struct auxtrace_buffer *buffer);
+void auxtrace_buffer__free(struct auxtrace_buffer *buffer);
+
+int auxtrace_heap__add(struct auxtrace_heap *heap, unsigned int queue_nr,
+                      u64 ordinal);
+void auxtrace_heap__pop(struct auxtrace_heap *heap);
+void auxtrace_heap__free(struct auxtrace_heap *heap);
+
+struct auxtrace_cache_entry {
+       struct hlist_node hash;
+       u32 key;
+};
+
+struct auxtrace_cache *auxtrace_cache__new(unsigned int bits, size_t entry_size,
+                                          unsigned int limit_percent);
+void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache);
+void *auxtrace_cache__alloc_entry(struct auxtrace_cache *c);
+void auxtrace_cache__free_entry(struct auxtrace_cache *c, void *entry);
+int auxtrace_cache__add(struct auxtrace_cache *c, u32 key,
+                       struct auxtrace_cache_entry *entry);
+void *auxtrace_cache__lookup(struct auxtrace_cache *c, u32 key);
+
+struct auxtrace_record *auxtrace_record__init(struct perf_evlist *evlist,
+                                             int *err);
+
+int auxtrace_parse_snapshot_options(struct auxtrace_record *itr,
+                                   struct record_opts *opts,
+                                   const char *str);
+int auxtrace_record__options(struct auxtrace_record *itr,
+                            struct perf_evlist *evlist,
+                            struct record_opts *opts);
+size_t auxtrace_record__info_priv_size(struct auxtrace_record *itr);
+int auxtrace_record__info_fill(struct auxtrace_record *itr,
+                              struct perf_session *session,
+                              struct auxtrace_info_event *auxtrace_info,
+                              size_t priv_size);
+void auxtrace_record__free(struct auxtrace_record *itr);
+int auxtrace_record__snapshot_start(struct auxtrace_record *itr);
+int auxtrace_record__snapshot_finish(struct auxtrace_record *itr);
+int auxtrace_record__find_snapshot(struct auxtrace_record *itr, int idx,
+                                  struct auxtrace_mmap *mm,
+                                  unsigned char *data, u64 *head, u64 *old);
+u64 auxtrace_record__reference(struct auxtrace_record *itr);
+
+int auxtrace_index__auxtrace_event(struct list_head *head, union perf_event *event,
+                                  off_t file_offset);
+int auxtrace_index__write(int fd, struct list_head *head);
+int auxtrace_index__process(int fd, u64 size, struct perf_session *session,
+                           bool needs_swap);
+void auxtrace_index__free(struct list_head *head);
+
+void auxtrace_synth_error(struct auxtrace_error_event *auxtrace_error, int type,
+                         int code, int cpu, pid_t pid, pid_t tid, u64 ip,
+                         const char *msg);
+
+int perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr,
+                                        struct perf_tool *tool,
+                                        struct perf_session *session,
+                                        perf_event__handler_t process);
+int perf_event__process_auxtrace_info(struct perf_tool *tool,
+                                     union perf_event *event,
+                                     struct perf_session *session);
+s64 perf_event__process_auxtrace(struct perf_tool *tool,
+                                union perf_event *event,
+                                struct perf_session *session);
+int perf_event__process_auxtrace_error(struct perf_tool *tool,
+                                      union perf_event *event,
+                                      struct perf_session *session);
+int itrace_parse_synth_opts(const struct option *opt, const char *str,
+                           int unset);
+void itrace_synth_opts__set_default(struct itrace_synth_opts *synth_opts);
+
+size_t perf_event__fprintf_auxtrace_error(union perf_event *event, FILE *fp);
+void perf_session__auxtrace_error_inc(struct perf_session *session,
+                                     union perf_event *event);
+void events_stats__auxtrace_error_warn(const struct events_stats *stats);
+
+static inline int auxtrace__process_event(struct perf_session *session,
+                                         union perf_event *event,
+                                         struct perf_sample *sample,
+                                         struct perf_tool *tool)
+{
+       if (!session->auxtrace)
+               return 0;
+
+       return session->auxtrace->process_event(session, event, sample, tool);
+}
+
+static inline int auxtrace__flush_events(struct perf_session *session,
+                                        struct perf_tool *tool)
+{
+       if (!session->auxtrace)
+               return 0;
+
+       return session->auxtrace->flush_events(session, tool);
+}
+
+static inline void auxtrace__free_events(struct perf_session *session)
+{
+       if (!session->auxtrace)
+               return;
+
+       return session->auxtrace->free_events(session);
+}
+
+static inline void auxtrace__free(struct perf_session *session)
+{
+       if (!session->auxtrace)
+               return;
+
+       return session->auxtrace->free(session);
+}
+
+#else
+
+static inline struct auxtrace_record *
+auxtrace_record__init(struct perf_evlist *evlist __maybe_unused,
+                     int *err __maybe_unused)
+{
+       *err = 0;
+       return NULL;
+}
+
+static inline
+void auxtrace_record__free(struct auxtrace_record *itr __maybe_unused)
+{
+}
+
+static inline int
+perf_event__synthesize_auxtrace_info(struct auxtrace_record *itr __maybe_unused,
+                                    struct perf_tool *tool __maybe_unused,
+                                    struct perf_session *session __maybe_unused,
+                                    perf_event__handler_t process __maybe_unused)
+{
+       return -EINVAL;
+}
+
+static inline
+int auxtrace_record__options(struct auxtrace_record *itr __maybe_unused,
+                            struct perf_evlist *evlist __maybe_unused,
+                            struct record_opts *opts __maybe_unused)
+{
+       return 0;
+}
+
+#define perf_event__process_auxtrace_info              0
+#define perf_event__process_auxtrace                   0
+#define perf_event__process_auxtrace_error             0
+
+static inline
+void perf_session__auxtrace_error_inc(struct perf_session *session
+                                     __maybe_unused,
+                                     union perf_event *event
+                                     __maybe_unused)
+{
+}
+
+static inline
+void events_stats__auxtrace_error_warn(const struct events_stats *stats
+                                      __maybe_unused)
+{
+}
+
+static inline
+int itrace_parse_synth_opts(const struct option *opt __maybe_unused,
+                           const char *str __maybe_unused,
+                           int unset __maybe_unused)
+{
+       pr_err("AUX area tracing not supported\n");
+       return -EINVAL;
+}
+
+static inline
+int auxtrace_parse_snapshot_options(struct auxtrace_record *itr __maybe_unused,
+                                   struct record_opts *opts __maybe_unused,
+                                   const char *str)
+{
+       if (!str)
+               return 0;
+       pr_err("AUX area tracing not supported\n");
+       return -EINVAL;
+}
+
+static inline
+int auxtrace__process_event(struct perf_session *session __maybe_unused,
+                           union perf_event *event __maybe_unused,
+                           struct perf_sample *sample __maybe_unused,
+                           struct perf_tool *tool __maybe_unused)
+{
+       return 0;
+}
+
+static inline
+int auxtrace__flush_events(struct perf_session *session __maybe_unused,
+                          struct perf_tool *tool __maybe_unused)
+{
+       return 0;
+}
+
+static inline
+void auxtrace__free_events(struct perf_session *session __maybe_unused)
+{
+}
+
+static inline
+void auxtrace_cache__free(struct auxtrace_cache *auxtrace_cache __maybe_unused)
+{
+}
+
+static inline
+void auxtrace__free(struct perf_session *session __maybe_unused)
+{
+}
+
+static inline
+int auxtrace_index__write(int fd __maybe_unused,
+                         struct list_head *head __maybe_unused)
+{
+       return -EINVAL;
+}
+
+static inline
+int auxtrace_index__process(int fd __maybe_unused,
+                           u64 size __maybe_unused,
+                           struct perf_session *session __maybe_unused,
+                           bool needs_swap __maybe_unused)
+{
+       return -EINVAL;
+}
+
+static inline
+void auxtrace_index__free(struct list_head *head __maybe_unused)
+{
+}
+
+int auxtrace_mmap__mmap(struct auxtrace_mmap *mm,
+                       struct auxtrace_mmap_params *mp,
+                       void *userpg, int fd);
+void auxtrace_mmap__munmap(struct auxtrace_mmap *mm);
+void auxtrace_mmap_params__init(struct auxtrace_mmap_params *mp,
+                               off_t auxtrace_offset,
+                               unsigned int auxtrace_pages,
+                               bool auxtrace_overwrite);
+void auxtrace_mmap_params__set_idx(struct auxtrace_mmap_params *mp,
+                                  struct perf_evlist *evlist, int idx,
+                                  bool per_cpu);
+
+#endif
+
+#endif
index 61867dff5d5aa6dea4079d060514f1bd57a63630..1f6fc2323ef97d5e9fdea6f70a9028db1d14a37e 100644 (file)
@@ -43,6 +43,7 @@ int build_id__mark_dso_hit(struct perf_tool *tool __maybe_unused,
        if (al.map != NULL)
                al.map->dso->hit = 1;
 
+       thread__put(thread);
        return 0;
 }
 
@@ -59,8 +60,10 @@ static int perf_event__exit_del_thread(struct perf_tool *tool __maybe_unused,
        dump_printf("(%d:%d):(%d:%d)\n", event->fork.pid, event->fork.tid,
                    event->fork.ppid, event->fork.ptid);
 
-       if (thread)
+       if (thread) {
                machine__remove_thread(machine, thread);
+               thread__put(thread);
+       }
 
        return 0;
 }
@@ -159,15 +162,20 @@ static int write_buildid(const char *name, size_t name_len, u8 *build_id,
        return write_padded(fd, name, name_len + 1, len);
 }
 
-static int __dsos__write_buildid_table(struct list_head *head,
-                                      struct machine *machine,
-                                      pid_t pid, u16 misc, int fd)
+static int machine__write_buildid_table(struct machine *machine, int fd)
 {
+       int err = 0;
        char nm[PATH_MAX];
        struct dso *pos;
+       u16 kmisc = PERF_RECORD_MISC_KERNEL,
+           umisc = PERF_RECORD_MISC_USER;
 
-       dsos__for_each_with_build_id(pos, head) {
-               int err;
+       if (!machine__is_host(machine)) {
+               kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
+               umisc = PERF_RECORD_MISC_GUEST_USER;
+       }
+
+       dsos__for_each_with_build_id(pos, &machine->dsos.head) {
                const char *name;
                size_t name_len;
 
@@ -186,32 +194,12 @@ static int __dsos__write_buildid_table(struct list_head *head,
                        name_len = pos->long_name_len + 1;
                }
 
-               err = write_buildid(name, name_len, pos->build_id,
-                                   pid, misc, fd);
+               err = write_buildid(name, name_len, pos->build_id, machine->pid,
+                                   pos->kernel ? kmisc : umisc, fd);
                if (err)
-                       return err;
-       }
-
-       return 0;
-}
-
-static int machine__write_buildid_table(struct machine *machine, int fd)
-{
-       int err;
-       u16 kmisc = PERF_RECORD_MISC_KERNEL,
-           umisc = PERF_RECORD_MISC_USER;
-
-       if (!machine__is_host(machine)) {
-               kmisc = PERF_RECORD_MISC_GUEST_KERNEL;
-               umisc = PERF_RECORD_MISC_GUEST_USER;
+                       break;
        }
 
-       err = __dsos__write_buildid_table(&machine->kernel_dsos.head, machine,
-                                         machine->pid, kmisc, fd);
-       if (err == 0)
-               err = __dsos__write_buildid_table(&machine->user_dsos.head,
-                                                 machine, machine->pid, umisc,
-                                                 fd);
        return err;
 }
 
@@ -244,13 +232,7 @@ static int __dsos__hit_all(struct list_head *head)
 
 static int machine__hit_all_dsos(struct machine *machine)
 {
-       int err;
-
-       err = __dsos__hit_all(&machine->kernel_dsos.head);
-       if (err)
-               return err;
-
-       return __dsos__hit_all(&machine->user_dsos.head);
+       return __dsos__hit_all(&machine->dsos.head);
 }
 
 int dsos__hit_all(struct perf_session *session)
@@ -490,9 +472,7 @@ static int __dsos__cache_build_ids(struct list_head *head,
 
 static int machine__cache_build_ids(struct machine *machine)
 {
-       int ret = __dsos__cache_build_ids(&machine->kernel_dsos.head, machine);
-       ret |= __dsos__cache_build_ids(&machine->user_dsos.head, machine);
-       return ret;
+       return __dsos__cache_build_ids(&machine->dsos.head, machine);
 }
 
 int perf_session__cache_build_ids(struct perf_session *session)
@@ -517,11 +497,7 @@ int perf_session__cache_build_ids(struct perf_session *session)
 
 static bool machine__read_build_ids(struct machine *machine, bool with_hits)
 {
-       bool ret;
-
-       ret  = __dsos__read_build_ids(&machine->kernel_dsos.head, with_hits);
-       ret |= __dsos__read_build_ids(&machine->user_dsos.head, with_hits);
-       return ret;
+       return __dsos__read_build_ids(&machine->dsos.head, with_hits);
 }
 
 bool perf_session__read_build_ids(struct perf_session *session, bool with_hits)
index fbcca21d66ab9b6887f084b19bdff98109201612..c861373aaed33dafd233a7bf5eebefa4dfd9874b 100644 (file)
@@ -30,7 +30,6 @@ extern const char *perf_config_dirname(const char *, const char *);
 
 /* pager.c */
 extern void setup_pager(void);
-extern const char *pager_program;
 extern int pager_in_use(void);
 extern int pager_use_color;
 
index 6033a0a212ca5c255434ae2cf34c7370d785d858..679c2c6d8ade7daeace3d55300049aca7fe68a0b 100644 (file)
@@ -72,6 +72,10 @@ extern struct callchain_param callchain_param;
 struct callchain_list {
        u64                     ip;
        struct map_symbol       ms;
+       struct /* for TUI */ {
+               bool            unfolded;
+               bool            has_children;
+       };
        char                   *srcline;
        struct list_head        list;
 };
index 88f7be3994321f8ef717262909ac6c38853fc691..32e12ecfe9c576767f18a3cb42e6c5dedfc3f048 100644 (file)
@@ -115,23 +115,19 @@ static int add_cgroup(struct perf_evlist *evlist, char *str)
                        goto found;
                n++;
        }
-       if (cgrp->refcnt == 0)
+       if (atomic_read(&cgrp->refcnt) == 0)
                free(cgrp);
 
        return -1;
 found:
-       cgrp->refcnt++;
+       atomic_inc(&cgrp->refcnt);
        counter->cgrp = cgrp;
        return 0;
 }
 
 void close_cgroup(struct cgroup_sel *cgrp)
 {
-       if (!cgrp)
-               return;
-
-       /* XXX: not reentrant */
-       if (--cgrp->refcnt == 0) {
+       if (cgrp && atomic_dec_and_test(&cgrp->refcnt)) {
                close(cgrp->fd);
                zfree(&cgrp->name);
                free(cgrp);
index 89acd6debdc5fc06d28889cb95eb30d7af98642f..b4b8cb42fe5e04b7458fe3b57f764b0bda6c18db 100644 (file)
@@ -1,12 +1,14 @@
 #ifndef __CGROUP_H__
 #define __CGROUP_H__
 
+#include <linux/atomic.h>
+
 struct option;
 
 struct cgroup_sel {
        char *name;
        int fd;
-       int refcnt;
+       atomic_t refcnt;
 };
 
 
index b2bb59df65e10c10cfe6dd1aacb495d4804a6a4f..21b7ff382c3f0dfb2e0bff43074adae3c0b10973 100644 (file)
@@ -2,24 +2,27 @@
 #include "util.h"
 #include <stdlib.h>
 #include <stdio.h>
+#include <linux/atomic.h>
 
 struct comm_str {
        char *str;
        struct rb_node rb_node;
-       int ref;
+       atomic_t refcnt;
 };
 
 /* Should perhaps be moved to struct machine */
 static struct rb_root comm_str_root;
 
-static void comm_str__get(struct comm_str *cs)
+static struct comm_str *comm_str__get(struct comm_str *cs)
 {
-       cs->ref++;
+       if (cs)
+               atomic_inc(&cs->refcnt);
+       return cs;
 }
 
 static void comm_str__put(struct comm_str *cs)
 {
-       if (!--cs->ref) {
+       if (cs && atomic_dec_and_test(&cs->refcnt)) {
                rb_erase(&cs->rb_node, &comm_str_root);
                zfree(&cs->str);
                free(cs);
@@ -40,6 +43,8 @@ static struct comm_str *comm_str__alloc(const char *str)
                return NULL;
        }
 
+       atomic_set(&cs->refcnt, 0);
+
        return cs;
 }
 
index dd17c9a32fbcfcf3c55c8cc33a44349582f4bb89..5bfc1198ab465c1873c7a112eefaa97bf286ee6f 100644 (file)
@@ -14,6 +14,7 @@
 #include <babeltrace/ctf-writer/event.h>
 #include <babeltrace/ctf-writer/event-types.h>
 #include <babeltrace/ctf-writer/event-fields.h>
+#include <babeltrace/ctf-ir/utils.h>
 #include <babeltrace/ctf/events.h>
 #include <traceevent/event-parse.h>
 #include "asm/bug.h"
@@ -38,12 +39,21 @@ struct evsel_priv {
        struct bt_ctf_event_class *event_class;
 };
 
+#define MAX_CPUS       4096
+
+struct ctf_stream {
+       struct bt_ctf_stream *stream;
+       int cpu;
+       u32 count;
+};
+
 struct ctf_writer {
        /* writer primitives */
-       struct bt_ctf_writer            *writer;
-       struct bt_ctf_stream            *stream;
-       struct bt_ctf_stream_class      *stream_class;
-       struct bt_ctf_clock             *clock;
+       struct bt_ctf_writer             *writer;
+       struct ctf_stream               **stream;
+       int                               stream_cnt;
+       struct bt_ctf_stream_class       *stream_class;
+       struct bt_ctf_clock              *clock;
 
        /* data types */
        union {
@@ -65,6 +75,9 @@ struct convert {
 
        u64                     events_size;
        u64                     events_count;
+
+       /* Ordered events configured queue size. */
+       u64                     queue_size;
 };
 
 static int value_set(struct bt_ctf_field_type *type,
@@ -153,6 +166,43 @@ get_tracepoint_field_type(struct ctf_writer *cw, struct format_field *field)
                return cw->data.u32;
 }
 
+static unsigned long long adjust_signedness(unsigned long long value_int, int size)
+{
+       unsigned long long value_mask;
+
+       /*
+        * value_mask = (1 << (size * 8 - 1)) - 1.
+        * Directly set value_mask for code readers.
+        */
+       switch (size) {
+       case 1:
+               value_mask = 0x7fULL;
+               break;
+       case 2:
+               value_mask = 0x7fffULL;
+               break;
+       case 4:
+               value_mask = 0x7fffffffULL;
+               break;
+       case 8:
+               /*
+                * For 64 bit value, return it self. There is no need
+                * to fill high bit.
+                */
+               /* Fall through */
+       default:
+               /* BUG! */
+               return value_int;
+       }
+
+       /* If it is a positive value, don't adjust. */
+       if ((value_int & (~0ULL - value_mask)) == 0)
+               return value_int;
+
+       /* Fill upper part of value_int with 1 to make it a negative long long. */
+       return (value_int & value_mask) | ~value_mask;
+}
+
 static int add_tracepoint_field_value(struct ctf_writer *cw,
                                      struct bt_ctf_event_class *event_class,
                                      struct bt_ctf_event *event,
@@ -164,7 +214,6 @@ static int add_tracepoint_field_value(struct ctf_writer *cw,
        struct bt_ctf_field *field;
        const char *name = fmtf->name;
        void *data = sample->raw_data;
-       unsigned long long value_int;
        unsigned long flags = fmtf->flags;
        unsigned int n_items;
        unsigned int i;
@@ -172,6 +221,7 @@ static int add_tracepoint_field_value(struct ctf_writer *cw,
        unsigned int len;
        int ret;
 
+       name = fmtf->alias;
        offset = fmtf->offset;
        len = fmtf->size;
        if (flags & FIELD_IS_STRING)
@@ -208,11 +258,6 @@ static int add_tracepoint_field_value(struct ctf_writer *cw,
        type = get_tracepoint_field_type(cw, fmtf);
 
        for (i = 0; i < n_items; i++) {
-               if (!(flags & FIELD_IS_STRING))
-                       value_int = pevent_read_number(
-                                       fmtf->event->pevent,
-                                       data + offset + i * len, len);
-
                if (flags & FIELD_IS_ARRAY)
                        field = bt_ctf_field_array_get_field(array_field, i);
                else
@@ -226,12 +271,21 @@ static int add_tracepoint_field_value(struct ctf_writer *cw,
                if (flags & FIELD_IS_STRING)
                        ret = bt_ctf_field_string_set_value(field,
                                        data + offset + i * len);
-               else if (!(flags & FIELD_IS_SIGNED))
-                       ret = bt_ctf_field_unsigned_integer_set_value(
-                                       field, value_int);
-               else
-                       ret = bt_ctf_field_signed_integer_set_value(
-                                       field, value_int);
+               else {
+                       unsigned long long value_int;
+
+                       value_int = pevent_read_number(
+                                       fmtf->event->pevent,
+                                       data + offset + i * len, len);
+
+                       if (!(flags & FIELD_IS_SIGNED))
+                               ret = bt_ctf_field_unsigned_integer_set_value(
+                                               field, value_int);
+                       else
+                               ret = bt_ctf_field_signed_integer_set_value(
+                                               field, adjust_signedness(value_int, len));
+               }
+
                if (ret) {
                        pr_err("failed to set file value %s\n", name);
                        goto err_put_field;
@@ -346,12 +400,6 @@ static int add_generic_values(struct ctf_writer *cw,
                        return -1;
        }
 
-       if (type & PERF_SAMPLE_CPU) {
-               ret = value_set_u32(cw, event, "perf_cpu", sample->cpu);
-               if (ret)
-                       return -1;
-       }
-
        if (type & PERF_SAMPLE_PERIOD) {
                ret = value_set_u64(cw, event, "perf_period", sample->period);
                if (ret)
@@ -381,6 +429,129 @@ static int add_generic_values(struct ctf_writer *cw,
        return 0;
 }
 
+static int ctf_stream__flush(struct ctf_stream *cs)
+{
+       int err = 0;
+
+       if (cs) {
+               err = bt_ctf_stream_flush(cs->stream);
+               if (err)
+                       pr_err("CTF stream %d flush failed\n", cs->cpu);
+
+               pr("Flush stream for cpu %d (%u samples)\n",
+                  cs->cpu, cs->count);
+
+               cs->count = 0;
+       }
+
+       return err;
+}
+
+static struct ctf_stream *ctf_stream__create(struct ctf_writer *cw, int cpu)
+{
+       struct ctf_stream *cs;
+       struct bt_ctf_field *pkt_ctx   = NULL;
+       struct bt_ctf_field *cpu_field = NULL;
+       struct bt_ctf_stream *stream   = NULL;
+       int ret;
+
+       cs = zalloc(sizeof(*cs));
+       if (!cs) {
+               pr_err("Failed to allocate ctf stream\n");
+               return NULL;
+       }
+
+       stream = bt_ctf_writer_create_stream(cw->writer, cw->stream_class);
+       if (!stream) {
+               pr_err("Failed to create CTF stream\n");
+               goto out;
+       }
+
+       pkt_ctx = bt_ctf_stream_get_packet_context(stream);
+       if (!pkt_ctx) {
+               pr_err("Failed to obtain packet context\n");
+               goto out;
+       }
+
+       cpu_field = bt_ctf_field_structure_get_field(pkt_ctx, "cpu_id");
+       bt_ctf_field_put(pkt_ctx);
+       if (!cpu_field) {
+               pr_err("Failed to obtain cpu field\n");
+               goto out;
+       }
+
+       ret = bt_ctf_field_unsigned_integer_set_value(cpu_field, (u32) cpu);
+       if (ret) {
+               pr_err("Failed to update CPU number\n");
+               goto out;
+       }
+
+       bt_ctf_field_put(cpu_field);
+
+       cs->cpu    = cpu;
+       cs->stream = stream;
+       return cs;
+
+out:
+       if (cpu_field)
+               bt_ctf_field_put(cpu_field);
+       if (stream)
+               bt_ctf_stream_put(stream);
+
+       free(cs);
+       return NULL;
+}
+
+static void ctf_stream__delete(struct ctf_stream *cs)
+{
+       if (cs) {
+               bt_ctf_stream_put(cs->stream);
+               free(cs);
+       }
+}
+
+static struct ctf_stream *ctf_stream(struct ctf_writer *cw, int cpu)
+{
+       struct ctf_stream *cs = cw->stream[cpu];
+
+       if (!cs) {
+               cs = ctf_stream__create(cw, cpu);
+               cw->stream[cpu] = cs;
+       }
+
+       return cs;
+}
+
+static int get_sample_cpu(struct ctf_writer *cw, struct perf_sample *sample,
+                         struct perf_evsel *evsel)
+{
+       int cpu = 0;
+
+       if (evsel->attr.sample_type & PERF_SAMPLE_CPU)
+               cpu = sample->cpu;
+
+       if (cpu > cw->stream_cnt) {
+               pr_err("Event was recorded for CPU %d, limit is at %d.\n",
+                       cpu, cw->stream_cnt);
+               cpu = 0;
+       }
+
+       return cpu;
+}
+
+#define STREAM_FLUSH_COUNT 100000
+
+/*
+ * Currently we have no other way to determine the
+ * time for the stream flush other than keep track
+ * of the number of events and check it against
+ * threshold.
+ */
+static bool is_flush_needed(struct ctf_stream *cs)
+{
+       return cs->count >= STREAM_FLUSH_COUNT;
+}
+
 static int process_sample_event(struct perf_tool *tool,
                                union perf_event *_event __maybe_unused,
                                struct perf_sample *sample,
@@ -390,6 +561,7 @@ static int process_sample_event(struct perf_tool *tool,
        struct convert *c = container_of(tool, struct convert, tool);
        struct evsel_priv *priv = evsel->priv;
        struct ctf_writer *cw = &c->writer;
+       struct ctf_stream *cs;
        struct bt_ctf_event_class *event_class;
        struct bt_ctf_event *event;
        int ret;
@@ -424,9 +596,93 @@ static int process_sample_event(struct perf_tool *tool,
                        return -1;
        }
 
-       bt_ctf_stream_append_event(cw->stream, event);
+       cs = ctf_stream(cw, get_sample_cpu(cw, sample, evsel));
+       if (cs) {
+               if (is_flush_needed(cs))
+                       ctf_stream__flush(cs);
+
+               cs->count++;
+               bt_ctf_stream_append_event(cs->stream, event);
+       }
+
        bt_ctf_event_put(event);
-       return 0;
+       return cs ? 0 : -1;
+}
+
+/* If dup < 0, add a prefix. Else, add _dupl_X suffix. */
+static char *change_name(char *name, char *orig_name, int dup)
+{
+       char *new_name = NULL;
+       size_t len;
+
+       if (!name)
+               name = orig_name;
+
+       if (dup >= 10)
+               goto out;
+       /*
+        * Add '_' prefix to potential keywork.  According to
+        * Mathieu Desnoyers (https://lkml.org/lkml/2015/1/23/652),
+        * futher CTF spec updating may require us to use '$'.
+        */
+       if (dup < 0)
+               len = strlen(name) + sizeof("_");
+       else
+               len = strlen(orig_name) + sizeof("_dupl_X");
+
+       new_name = malloc(len);
+       if (!new_name)
+               goto out;
+
+       if (dup < 0)
+               snprintf(new_name, len, "_%s", name);
+       else
+               snprintf(new_name, len, "%s_dupl_%d", orig_name, dup);
+
+out:
+       if (name != orig_name)
+               free(name);
+       return new_name;
+}
+
+static int event_class_add_field(struct bt_ctf_event_class *event_class,
+               struct bt_ctf_field_type *type,
+               struct format_field *field)
+{
+       struct bt_ctf_field_type *t = NULL;
+       char *name;
+       int dup = 1;
+       int ret;
+
+       /* alias was already assigned */
+       if (field->alias != field->name)
+               return bt_ctf_event_class_add_field(event_class, type,
+                               (char *)field->alias);
+
+       name = field->name;
+
+       /* If 'name' is a keywork, add prefix. */
+       if (bt_ctf_validate_identifier(name))
+               name = change_name(name, field->name, -1);
+
+       if (!name) {
+               pr_err("Failed to fix invalid identifier.");
+               return -1;
+       }
+       while ((t = bt_ctf_event_class_get_field_by_name(event_class, name))) {
+               bt_ctf_field_type_put(t);
+               name = change_name(name, field->name, dup++);
+               if (!name) {
+                       pr_err("Failed to create dup name for '%s'\n", field->name);
+                       return -1;
+               }
+       }
+
+       ret = bt_ctf_event_class_add_field(event_class, type, name);
+       if (!ret)
+               field->alias = name;
+
+       return ret;
 }
 
 static int add_tracepoint_fields_types(struct ctf_writer *cw,
@@ -457,14 +713,14 @@ static int add_tracepoint_fields_types(struct ctf_writer *cw,
                if (flags & FIELD_IS_ARRAY)
                        type = bt_ctf_field_type_array_create(type, field->arraylen);
 
-               ret = bt_ctf_event_class_add_field(event_class, type,
-                               field->name);
+               ret = event_class_add_field(event_class, type, field);
 
                if (flags & FIELD_IS_ARRAY)
                        bt_ctf_field_type_put(type);
 
                if (ret) {
-                       pr_err("Failed to add field '%s\n", field->name);
+                       pr_err("Failed to add field '%s': %d\n",
+                                       field->name, ret);
                        return -1;
                }
        }
@@ -508,7 +764,7 @@ static int add_generic_types(struct ctf_writer *cw, struct perf_evsel *evsel,
        do {                                                            \
                pr2("  field '%s'\n", n);                               \
                if (bt_ctf_event_class_add_field(cl, t, n)) {           \
-                       pr_err("Failed to add field '%s;\n", n);        \
+                       pr_err("Failed to add field '%s';\n", n);       \
                        return -1;                                      \
                }                                                       \
        } while (0)
@@ -528,9 +784,6 @@ static int add_generic_types(struct ctf_writer *cw, struct perf_evsel *evsel,
        if (type & PERF_SAMPLE_STREAM_ID)
                ADD_FIELD(event_class, cw->data.u64, "perf_stream_id");
 
-       if (type & PERF_SAMPLE_CPU)
-               ADD_FIELD(event_class, cw->data.u32, "perf_cpu");
-
        if (type & PERF_SAMPLE_PERIOD)
                ADD_FIELD(event_class, cw->data.u64, "perf_period");
 
@@ -604,6 +857,39 @@ static int setup_events(struct ctf_writer *cw, struct perf_session *session)
        return 0;
 }
 
+static int setup_streams(struct ctf_writer *cw, struct perf_session *session)
+{
+       struct ctf_stream **stream;
+       struct perf_header *ph = &session->header;
+       int ncpus;
+
+       /*
+        * Try to get the number of cpus used in the data file,
+        * if not present fallback to the MAX_CPUS.
+        */
+       ncpus = ph->env.nr_cpus_avail ?: MAX_CPUS;
+
+       stream = zalloc(sizeof(*stream) * ncpus);
+       if (!stream) {
+               pr_err("Failed to allocate streams.\n");
+               return -ENOMEM;
+       }
+
+       cw->stream     = stream;
+       cw->stream_cnt = ncpus;
+       return 0;
+}
+
+static void free_streams(struct ctf_writer *cw)
+{
+       int cpu;
+
+       for (cpu = 0; cpu < cw->stream_cnt; cpu++)
+               ctf_stream__delete(cw->stream[cpu]);
+
+       free(cw->stream);
+}
+
 static int ctf_writer__setup_env(struct ctf_writer *cw,
                                 struct perf_session *session)
 {
@@ -713,7 +999,7 @@ static void ctf_writer__cleanup(struct ctf_writer *cw)
        ctf_writer__cleanup_data(cw);
 
        bt_ctf_clock_put(cw->clock);
-       bt_ctf_stream_put(cw->stream);
+       free_streams(cw);
        bt_ctf_stream_class_put(cw->stream_class);
        bt_ctf_writer_put(cw->writer);
 
@@ -725,8 +1011,9 @@ static int ctf_writer__init(struct ctf_writer *cw, const char *path)
 {
        struct bt_ctf_writer            *writer;
        struct bt_ctf_stream_class      *stream_class;
-       struct bt_ctf_stream            *stream;
        struct bt_ctf_clock             *clock;
+       struct bt_ctf_field_type        *pkt_ctx_type;
+       int                             ret;
 
        /* CTF writer */
        writer = bt_ctf_writer_create(path);
@@ -767,14 +1054,15 @@ static int ctf_writer__init(struct ctf_writer *cw, const char *path)
        if (ctf_writer__init_data(cw))
                goto err_cleanup;
 
-       /* CTF stream instance */
-       stream = bt_ctf_writer_create_stream(writer, stream_class);
-       if (!stream) {
-               pr("Failed to create CTF stream.\n");
+       /* Add cpu_id for packet context */
+       pkt_ctx_type = bt_ctf_stream_class_get_packet_context_type(stream_class);
+       if (!pkt_ctx_type)
                goto err_cleanup;
-       }
 
-       cw->stream = stream;
+       ret = bt_ctf_field_type_structure_add_field(pkt_ctx_type, cw->data.u32, "cpu_id");
+       bt_ctf_field_type_put(pkt_ctx_type);
+       if (ret)
+               goto err_cleanup;
 
        /* CTF clock writer setup */
        if (bt_ctf_writer_add_clock(writer, clock)) {
@@ -791,6 +1079,28 @@ err:
        return -1;
 }
 
+static int ctf_writer__flush_streams(struct ctf_writer *cw)
+{
+       int cpu, ret = 0;
+
+       for (cpu = 0; cpu < cw->stream_cnt && !ret; cpu++)
+               ret = ctf_stream__flush(cw->stream[cpu]);
+
+       return ret;
+}
+
+static int convert__config(const char *var, const char *value, void *cb)
+{
+       struct convert *c = cb;
+
+       if (!strcmp(var, "convert.queue-size")) {
+               c->queue_size = perf_config_u64(var, value);
+               return 0;
+       }
+
+       return perf_default_config(var, value, cb);
+}
+
 int bt_convert__perf2ctf(const char *input, const char *path, bool force)
 {
        struct perf_session *session;
@@ -817,6 +1127,8 @@ int bt_convert__perf2ctf(const char *input, const char *path, bool force)
        struct ctf_writer *cw = &c.writer;
        int err = -1;
 
+       perf_config(convert__config, &c);
+
        /* CTF writer */
        if (ctf_writer__init(cw, path))
                return -1;
@@ -826,6 +1138,11 @@ int bt_convert__perf2ctf(const char *input, const char *path, bool force)
        if (!session)
                goto free_writer;
 
+       if (c.queue_size) {
+               ordered_events__set_alloc_size(&session->ordered_events,
+                                              c.queue_size);
+       }
+
        /* CTF writer env/clock setup  */
        if (ctf_writer__setup_env(cw, session))
                goto free_session;
@@ -834,9 +1151,14 @@ int bt_convert__perf2ctf(const char *input, const char *path, bool force)
        if (setup_events(cw, session))
                goto free_session;
 
+       if (setup_streams(cw, session))
+               goto free_session;
+
        err = perf_session__process_events(session);
        if (!err)
-               err = bt_ctf_stream_flush(cw->stream);
+               err = ctf_writer__flush_streams(cw);
+       else
+               pr_err("Error during conversion.\n");
 
        fprintf(stderr,
                "[ perf data convert: Converted '%s' into CTF data '%s' ]\n",
@@ -847,11 +1169,15 @@ int bt_convert__perf2ctf(const char *input, const char *path, bool force)
                (double) c.events_size / 1024.0 / 1024.0,
                c.events_count);
 
-       /* its all good */
-free_session:
        perf_session__delete(session);
+       ctf_writer__cleanup(cw);
+
+       return err;
 
+free_session:
+       perf_session__delete(session);
 free_writer:
        ctf_writer__cleanup(cw);
+       pr_err("Error during conversion setup.\n");
        return err;
 }
index bb39a3ffc70b3951f88f9260f01836cc4e959fc9..1c9689e4cc179a3e931e1b3e0b427accb7553cba 100644 (file)
@@ -122,6 +122,7 @@ int db_export__machine(struct db_export *dbe, struct machine *machine)
 int db_export__thread(struct db_export *dbe, struct thread *thread,
                      struct machine *machine, struct comm *comm)
 {
+       struct thread *main_thread;
        u64 main_thread_db_id = 0;
        int err;
 
@@ -131,8 +132,6 @@ int db_export__thread(struct db_export *dbe, struct thread *thread,
        thread->db_id = ++dbe->thread_last_db_id;
 
        if (thread->pid_ != -1) {
-               struct thread *main_thread;
-
                if (thread->pid_ == thread->tid) {
                        main_thread = thread;
                } else {
@@ -144,14 +143,16 @@ int db_export__thread(struct db_export *dbe, struct thread *thread,
                        err = db_export__thread(dbe, main_thread, machine,
                                                comm);
                        if (err)
-                               return err;
+                               goto out_put;
                        if (comm) {
                                err = db_export__comm_thread(dbe, comm, thread);
                                if (err)
-                                       return err;
+                                       goto out_put;
                        }
                }
                main_thread_db_id = main_thread->db_id;
+               if (main_thread != thread)
+                       thread__put(main_thread);
        }
 
        if (dbe->export_thread)
@@ -159,6 +160,10 @@ int db_export__thread(struct db_export *dbe, struct thread *thread,
                                          machine);
 
        return 0;
+
+out_put:
+       thread__put(main_thread);
+       return err;
 }
 
 int db_export__comm(struct db_export *dbe, struct comm *comm,
@@ -229,7 +234,7 @@ int db_export__symbol(struct db_export *dbe, struct symbol *sym,
 static struct thread *get_main_thread(struct machine *machine, struct thread *thread)
 {
        if (thread->pid_ == thread->tid)
-               return thread;
+               return thread__get(thread);
 
        if (thread->pid_ == -1)
                return NULL;
@@ -309,12 +314,12 @@ int db_export__sample(struct db_export *dbe, union perf_event *event,
 
        err = db_export__thread(dbe, thread, al->machine, comm);
        if (err)
-               return err;
+               goto out_put;
 
        if (comm) {
                err = db_export__comm(dbe, comm, main_thread);
                if (err)
-                       return err;
+                       goto out_put;
                es.comm_db_id = comm->db_id;
        }
 
@@ -322,7 +327,7 @@ int db_export__sample(struct db_export *dbe, union perf_event *event,
 
        err = db_ids_from_al(dbe, al, &es.dso_db_id, &es.sym_db_id, &es.offset);
        if (err)
-               return err;
+               goto out_put;
 
        if ((evsel->attr.sample_type & PERF_SAMPLE_ADDR) &&
            sample_addr_correlates_sym(&evsel->attr)) {
@@ -332,20 +337,22 @@ int db_export__sample(struct db_export *dbe, union perf_event *event,
                err = db_ids_from_al(dbe, &addr_al, &es.addr_dso_db_id,
                                     &es.addr_sym_db_id, &es.addr_offset);
                if (err)
-                       return err;
+                       goto out_put;
                if (dbe->crp) {
                        err = thread_stack__process(thread, comm, sample, al,
                                                    &addr_al, es.db_id,
                                                    dbe->crp);
                        if (err)
-                               return err;
+                               goto out_put;
                }
        }
 
        if (dbe->export_sample)
-               return dbe->export_sample(dbe, &es);
+               err = dbe->export_sample(dbe, &es);
 
-       return 0;
+out_put:
+       thread__put(main_thread);
+       return err;
 }
 
 static struct {
index fc0ddd5792a97f884e7ed142ad12fe87c2bfe408..7c0c08386a1d9d6fb5e8cba6c838e5c3c1949bd7 100644 (file)
@@ -4,6 +4,7 @@
 #include "symbol.h"
 #include "dso.h"
 #include "machine.h"
+#include "auxtrace.h"
 #include "util.h"
 #include "debug.h"
 
@@ -165,12 +166,28 @@ bool is_supported_compression(const char *ext)
        return false;
 }
 
-bool is_kernel_module(const char *pathname)
+bool is_kernel_module(const char *pathname, int cpumode)
 {
        struct kmod_path m;
-
-       if (kmod_path__parse(&m, pathname))
-               return NULL;
+       int mode = cpumode & PERF_RECORD_MISC_CPUMODE_MASK;
+
+       WARN_ONCE(mode != cpumode,
+                 "Internal error: passing unmasked cpumode (%x) to is_kernel_module",
+                 cpumode);
+
+       switch (mode) {
+       case PERF_RECORD_MISC_USER:
+       case PERF_RECORD_MISC_HYPERVISOR:
+       case PERF_RECORD_MISC_GUEST_USER:
+               return false;
+       /* Treat PERF_RECORD_MISC_CPUMODE_UNKNOWN as kernel */
+       default:
+               if (kmod_path__parse(&m, pathname)) {
+                       pr_err("Failed to check whether %s is a kernel module or not. Assume it is.",
+                                       pathname);
+                       return true;
+               }
+       }
 
        return m.kmod;
 }
@@ -214,12 +231,33 @@ int __kmod_path__parse(struct kmod_path *m, const char *path,
 {
        const char *name = strrchr(path, '/');
        const char *ext  = strrchr(path, '.');
+       bool is_simple_name = false;
 
        memset(m, 0x0, sizeof(*m));
        name = name ? name + 1 : path;
 
+       /*
+        * '.' is also a valid character for module name. For example:
+        * [aaa.bbb] is a valid module name. '[' should have higher
+        * priority than '.ko' suffix.
+        *
+        * The kernel names are from machine__mmap_name. Such
+        * name should belong to kernel itself, not kernel module.
+        */
+       if (name[0] == '[') {
+               is_simple_name = true;
+               if ((strncmp(name, "[kernel.kallsyms]", 17) == 0) ||
+                   (strncmp(name, "[guest.kernel.kallsyms", 22) == 0) ||
+                   (strncmp(name, "[vdso]", 6) == 0) ||
+                   (strncmp(name, "[vsyscall]", 10) == 0)) {
+                       m->kmod = false;
+
+               } else
+                       m->kmod = true;
+       }
+
        /* No extension, just return name. */
-       if (ext == NULL) {
+       if ((ext == NULL) || is_simple_name) {
                if (alloc_name) {
                        m->name = strdup(name);
                        return m->name ? 0 : -ENOMEM;
@@ -264,6 +302,7 @@ int __kmod_path__parse(struct kmod_path *m, const char *path,
  */
 static LIST_HEAD(dso__data_open);
 static long dso__data_open_cnt;
+static pthread_mutex_t dso__data_open_lock = PTHREAD_MUTEX_INITIALIZER;
 
 static void dso__list_add(struct dso *dso)
 {
@@ -433,18 +472,12 @@ static void check_data_close(void)
  */
 void dso__data_close(struct dso *dso)
 {
+       pthread_mutex_lock(&dso__data_open_lock);
        close_dso(dso);
+       pthread_mutex_unlock(&dso__data_open_lock);
 }
 
-/**
- * dso__data_fd - Get dso's data file descriptor
- * @dso: dso object
- * @machine: machine object
- *
- * External interface to find dso's file, open it and
- * returns file descriptor.
- */
-int dso__data_fd(struct dso *dso, struct machine *machine)
+static void try_to_open_dso(struct dso *dso, struct machine *machine)
 {
        enum dso_binary_type binary_type_data[] = {
                DSO_BINARY_TYPE__BUILD_ID_CACHE,
@@ -453,11 +486,8 @@ int dso__data_fd(struct dso *dso, struct machine *machine)
        };
        int i = 0;
 
-       if (dso->data.status == DSO_DATA_STATUS_ERROR)
-               return -1;
-
        if (dso->data.fd >= 0)
-               goto out;
+               return;
 
        if (dso->binary_type != DSO_BINARY_TYPE__NOT_FOUND) {
                dso->data.fd = open_dso(dso, machine);
@@ -477,10 +507,38 @@ out:
                dso->data.status = DSO_DATA_STATUS_OK;
        else
                dso->data.status = DSO_DATA_STATUS_ERROR;
+}
+
+/**
+ * dso__data_get_fd - Get dso's data file descriptor
+ * @dso: dso object
+ * @machine: machine object
+ *
+ * External interface to find dso's file, open it and
+ * returns file descriptor.  It should be paired with
+ * dso__data_put_fd() if it returns non-negative value.
+ */
+int dso__data_get_fd(struct dso *dso, struct machine *machine)
+{
+       if (dso->data.status == DSO_DATA_STATUS_ERROR)
+               return -1;
+
+       if (pthread_mutex_lock(&dso__data_open_lock) < 0)
+               return -1;
+
+       try_to_open_dso(dso, machine);
+
+       if (dso->data.fd < 0)
+               pthread_mutex_unlock(&dso__data_open_lock);
 
        return dso->data.fd;
 }
 
+void dso__data_put_fd(struct dso *dso __maybe_unused)
+{
+       pthread_mutex_unlock(&dso__data_open_lock);
+}
+
 bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
 {
        u32 flag = 1 << by;
@@ -494,10 +552,12 @@ bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by)
 }
 
 static void
-dso_cache__free(struct rb_root *root)
+dso_cache__free(struct dso *dso)
 {
+       struct rb_root *root = &dso->data.cache;
        struct rb_node *next = rb_first(root);
 
+       pthread_mutex_lock(&dso->lock);
        while (next) {
                struct dso_cache *cache;
 
@@ -506,10 +566,12 @@ dso_cache__free(struct rb_root *root)
                rb_erase(&cache->rb_node, root);
                free(cache);
        }
+       pthread_mutex_unlock(&dso->lock);
 }
 
-static struct dso_cache *dso_cache__find(const struct rb_root *root, u64 offset)
+static struct dso_cache *dso_cache__find(struct dso *dso, u64 offset)
 {
+       const struct rb_root *root = &dso->data.cache;
        struct rb_node * const *p = &root->rb_node;
        const struct rb_node *parent = NULL;
        struct dso_cache *cache;
@@ -528,17 +590,20 @@ static struct dso_cache *dso_cache__find(const struct rb_root *root, u64 offset)
                else
                        return cache;
        }
+
        return NULL;
 }
 
-static void
-dso_cache__insert(struct rb_root *root, struct dso_cache *new)
+static struct dso_cache *
+dso_cache__insert(struct dso *dso, struct dso_cache *new)
 {
+       struct rb_root *root = &dso->data.cache;
        struct rb_node **p = &root->rb_node;
        struct rb_node *parent = NULL;
        struct dso_cache *cache;
        u64 offset = new->offset;
 
+       pthread_mutex_lock(&dso->lock);
        while (*p != NULL) {
                u64 end;
 
@@ -550,10 +615,17 @@ dso_cache__insert(struct rb_root *root, struct dso_cache *new)
                        p = &(*p)->rb_left;
                else if (offset >= end)
                        p = &(*p)->rb_right;
+               else
+                       goto out;
        }
 
        rb_link_node(&new->rb_node, parent, p);
        rb_insert_color(&new->rb_node, root);
+
+       cache = NULL;
+out:
+       pthread_mutex_unlock(&dso->lock);
+       return cache;
 }
 
 static ssize_t
@@ -568,19 +640,33 @@ dso_cache__memcpy(struct dso_cache *cache, u64 offset,
 }
 
 static ssize_t
-dso_cache__read(struct dso *dso, u64 offset, u8 *data, ssize_t size)
+dso_cache__read(struct dso *dso, struct machine *machine,
+               u64 offset, u8 *data, ssize_t size)
 {
        struct dso_cache *cache;
+       struct dso_cache *old;
        ssize_t ret;
 
        do {
                u64 cache_offset;
 
-               ret = -ENOMEM;
-
                cache = zalloc(sizeof(*cache) + DSO__DATA_CACHE_SIZE);
                if (!cache)
+                       return -ENOMEM;
+
+               pthread_mutex_lock(&dso__data_open_lock);
+
+               /*
+                * dso->data.fd might be closed if other thread opened another
+                * file (dso) due to open file limit (RLIMIT_NOFILE).
+                */
+               try_to_open_dso(dso, machine);
+
+               if (dso->data.fd < 0) {
+                       ret = -errno;
+                       dso->data.status = DSO_DATA_STATUS_ERROR;
                        break;
+               }
 
                cache_offset = offset & DSO__DATA_CACHE_MASK;
 
@@ -590,11 +676,20 @@ dso_cache__read(struct dso *dso, u64 offset, u8 *data, ssize_t size)
 
                cache->offset = cache_offset;
                cache->size   = ret;
-               dso_cache__insert(&dso->data.cache, cache);
+       } while (0);
 
-               ret = dso_cache__memcpy(cache, offset, data, size);
+       pthread_mutex_unlock(&dso__data_open_lock);
 
-       } while (0);
+       if (ret > 0) {
+               old = dso_cache__insert(dso, cache);
+               if (old) {
+                       /* we lose the race */
+                       free(cache);
+                       cache = old;
+               }
+
+               ret = dso_cache__memcpy(cache, offset, data, size);
+       }
 
        if (ret <= 0)
                free(cache);
@@ -602,16 +697,16 @@ dso_cache__read(struct dso *dso, u64 offset, u8 *data, ssize_t size)
        return ret;
 }
 
-static ssize_t dso_cache_read(struct dso *dso, u64 offset,
-                             u8 *data, ssize_t size)
+static ssize_t dso_cache_read(struct dso *dso, struct machine *machine,
+                             u64 offset, u8 *data, ssize_t size)
 {
        struct dso_cache *cache;
 
-       cache = dso_cache__find(&dso->data.cache, offset);
+       cache = dso_cache__find(dso, offset);
        if (cache)
                return dso_cache__memcpy(cache, offset, data, size);
        else
-               return dso_cache__read(dso, offset, data, size);
+               return dso_cache__read(dso, machine, offset, data, size);
 }
 
 /*
@@ -619,7 +714,8 @@ static ssize_t dso_cache_read(struct dso *dso, u64 offset,
  * in the rb_tree. Any read to already cached data is served
  * by cached data.
  */
-static ssize_t cached_read(struct dso *dso, u64 offset, u8 *data, ssize_t size)
+static ssize_t cached_read(struct dso *dso, struct machine *machine,
+                          u64 offset, u8 *data, ssize_t size)
 {
        ssize_t r = 0;
        u8 *p = data;
@@ -627,7 +723,7 @@ static ssize_t cached_read(struct dso *dso, u64 offset, u8 *data, ssize_t size)
        do {
                ssize_t ret;
 
-               ret = dso_cache_read(dso, offset, p, size);
+               ret = dso_cache_read(dso, machine, offset, p, size);
                if (ret < 0)
                        return ret;
 
@@ -647,21 +743,44 @@ static ssize_t cached_read(struct dso *dso, u64 offset, u8 *data, ssize_t size)
        return r;
 }
 
-static int data_file_size(struct dso *dso)
+static int data_file_size(struct dso *dso, struct machine *machine)
 {
+       int ret = 0;
        struct stat st;
        char sbuf[STRERR_BUFSIZE];
 
-       if (!dso->data.file_size) {
-               if (fstat(dso->data.fd, &st)) {
-                       pr_err("dso mmap failed, fstat: %s\n",
-                               strerror_r(errno, sbuf, sizeof(sbuf)));
-                       return -1;
-               }
-               dso->data.file_size = st.st_size;
+       if (dso->data.file_size)
+               return 0;
+
+       if (dso->data.status == DSO_DATA_STATUS_ERROR)
+               return -1;
+
+       pthread_mutex_lock(&dso__data_open_lock);
+
+       /*
+        * dso->data.fd might be closed if other thread opened another
+        * file (dso) due to open file limit (RLIMIT_NOFILE).
+        */
+       try_to_open_dso(dso, machine);
+
+       if (dso->data.fd < 0) {
+               ret = -errno;
+               dso->data.status = DSO_DATA_STATUS_ERROR;
+               goto out;
        }
 
-       return 0;
+       if (fstat(dso->data.fd, &st) < 0) {
+               ret = -errno;
+               pr_err("dso cache fstat failed: %s\n",
+                      strerror_r(errno, sbuf, sizeof(sbuf)));
+               dso->data.status = DSO_DATA_STATUS_ERROR;
+               goto out;
+       }
+       dso->data.file_size = st.st_size;
+
+out:
+       pthread_mutex_unlock(&dso__data_open_lock);
+       return ret;
 }
 
 /**
@@ -673,23 +792,17 @@ static int data_file_size(struct dso *dso)
  */
 off_t dso__data_size(struct dso *dso, struct machine *machine)
 {
-       int fd;
-
-       fd = dso__data_fd(dso, machine);
-       if (fd < 0)
-               return fd;
-
-       if (data_file_size(dso))
+       if (data_file_size(dso, machine))
                return -1;
 
        /* For now just estimate dso data size is close to file size */
        return dso->data.file_size;
 }
 
-static ssize_t data_read_offset(struct dso *dso, u64 offset,
-                               u8 *data, ssize_t size)
+static ssize_t data_read_offset(struct dso *dso, struct machine *machine,
+                               u64 offset, u8 *data, ssize_t size)
 {
-       if (data_file_size(dso))
+       if (data_file_size(dso, machine))
                return -1;
 
        /* Check the offset sanity. */
@@ -699,7 +812,7 @@ static ssize_t data_read_offset(struct dso *dso, u64 offset,
        if (offset + size < offset)
                return -1;
 
-       return cached_read(dso, offset, data, size);
+       return cached_read(dso, machine, offset, data, size);
 }
 
 /**
@@ -716,10 +829,10 @@ static ssize_t data_read_offset(struct dso *dso, u64 offset,
 ssize_t dso__data_read_offset(struct dso *dso, struct machine *machine,
                              u64 offset, u8 *data, ssize_t size)
 {
-       if (dso__data_fd(dso, machine) < 0)
+       if (dso->data.status == DSO_DATA_STATUS_ERROR)
                return -1;
 
-       return data_read_offset(dso, offset, data, size);
+       return data_read_offset(dso, machine, offset, data, size);
 }
 
 /**
@@ -751,13 +864,13 @@ struct map *dso__new_map(const char *name)
        return map;
 }
 
-struct dso *dso__kernel_findnew(struct machine *machine, const char *name,
-                   const char *short_name, int dso_type)
+struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
+                                   const char *short_name, int dso_type)
 {
        /*
         * The kernel dso could be created by build_id processing.
         */
-       struct dso *dso = __dsos__findnew(&machine->kernel_dsos, name);
+       struct dso *dso = machine__findnew_dso(machine, name);
 
        /*
         * We need to run this in all cases, since during the build_id
@@ -776,8 +889,8 @@ struct dso *dso__kernel_findnew(struct machine *machine, const char *name,
  * Either one of the dso or name parameter must be non-NULL or the
  * function will not work.
  */
-static struct dso *dso__findlink_by_longname(struct rb_root *root,
-                                            struct dso *dso, const char *name)
+static struct dso *__dso__findlink_by_longname(struct rb_root *root,
+                                              struct dso *dso, const char *name)
 {
        struct rb_node **p = &root->rb_node;
        struct rb_node  *parent = NULL;
@@ -824,10 +937,10 @@ static struct dso *dso__findlink_by_longname(struct rb_root *root,
        return NULL;
 }
 
-static inline struct dso *
-dso__find_by_longname(const struct rb_root *root, const char *name)
+static inline struct dso *__dso__find_by_longname(struct rb_root *root,
+                                                 const char *name)
 {
-       return dso__findlink_by_longname((struct rb_root *)root, NULL, name);
+       return __dso__findlink_by_longname(root, NULL, name);
 }
 
 void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated)
@@ -935,6 +1048,8 @@ struct dso *dso__new(const char *name)
                RB_CLEAR_NODE(&dso->rb_node);
                INIT_LIST_HEAD(&dso->node);
                INIT_LIST_HEAD(&dso->data.open_entry);
+               pthread_mutex_init(&dso->lock, NULL);
+               atomic_set(&dso->refcnt, 1);
        }
 
        return dso;
@@ -961,12 +1076,27 @@ void dso__delete(struct dso *dso)
        }
 
        dso__data_close(dso);
-       dso_cache__free(&dso->data.cache);
+       auxtrace_cache__free(dso->auxtrace_cache);
+       dso_cache__free(dso);
        dso__free_a2l(dso);
        zfree(&dso->symsrc_filename);
+       pthread_mutex_destroy(&dso->lock);
        free(dso);
 }
 
+struct dso *dso__get(struct dso *dso)
+{
+       if (dso)
+               atomic_inc(&dso->refcnt);
+       return dso;
+}
+
+void dso__put(struct dso *dso)
+{
+       if (dso && atomic_dec_and_test(&dso->refcnt))
+               dso__delete(dso);
+}
+
 void dso__set_build_id(struct dso *dso, void *build_id)
 {
        memcpy(dso->build_id, build_id, sizeof(dso->build_id));
@@ -1033,14 +1163,41 @@ bool __dsos__read_build_ids(struct list_head *head, bool with_hits)
        return have_build_id;
 }
 
-void dsos__add(struct dsos *dsos, struct dso *dso)
+void __dsos__add(struct dsos *dsos, struct dso *dso)
 {
        list_add_tail(&dso->node, &dsos->head);
-       dso__findlink_by_longname(&dsos->root, dso, NULL);
+       __dso__findlink_by_longname(&dsos->root, dso, NULL);
+       /*
+        * It is now in the linked list, grab a reference, then garbage collect
+        * this when needing memory, by looking at LRU dso instances in the
+        * list with atomic_read(&dso->refcnt) == 1, i.e. no references
+        * anywhere besides the one for the list, do, under a lock for the
+        * list: remove it from the list, then a dso__put(), that probably will
+        * be the last and will then call dso__delete(), end of life.
+        *
+        * That, or at the end of the 'struct machine' lifetime, when all
+        * 'struct dso' instances will be removed from the list, in
+        * dsos__exit(), if they have no other reference from some other data
+        * structure.
+        *
+        * E.g.: after processing a 'perf.data' file and storing references
+        * to objects instantiated while processing events, we will have
+        * references to the 'thread', 'map', 'dso' structs all from 'struct
+        * hist_entry' instances, but we may not need anything not referenced,
+        * so we might as well call machines__exit()/machines__delete() and
+        * garbage collect it.
+        */
+       dso__get(dso);
+}
+
+void dsos__add(struct dsos *dsos, struct dso *dso)
+{
+       pthread_rwlock_wrlock(&dsos->lock);
+       __dsos__add(dsos, dso);
+       pthread_rwlock_unlock(&dsos->lock);
 }
 
-struct dso *dsos__find(const struct dsos *dsos, const char *name,
-                      bool cmp_short)
+struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
 {
        struct dso *pos;
 
@@ -1050,15 +1207,24 @@ struct dso *dsos__find(const struct dsos *dsos, const char *name,
                                return pos;
                return NULL;
        }
-       return dso__find_by_longname(&dsos->root, name);
+       return __dso__find_by_longname(&dsos->root, name);
+}
+
+struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short)
+{
+       struct dso *dso;
+       pthread_rwlock_rdlock(&dsos->lock);
+       dso = __dsos__find(dsos, name, cmp_short);
+       pthread_rwlock_unlock(&dsos->lock);
+       return dso;
 }
 
-struct dso *dsos__addnew(struct dsos *dsos, const char *name)
+struct dso *__dsos__addnew(struct dsos *dsos, const char *name)
 {
        struct dso *dso = dso__new(name);
 
        if (dso != NULL) {
-               dsos__add(dsos, dso);
+               __dsos__add(dsos, dso);
                dso__set_basename(dso);
        }
        return dso;
@@ -1066,9 +1232,18 @@ struct dso *dsos__addnew(struct dsos *dsos, const char *name)
 
 struct dso *__dsos__findnew(struct dsos *dsos, const char *name)
 {
-       struct dso *dso = dsos__find(dsos, name, false);
+       struct dso *dso = __dsos__find(dsos, name, false);
 
-       return dso ? dso : dsos__addnew(dsos, name);
+       return dso ? dso : __dsos__addnew(dsos, name);
+}
+
+struct dso *dsos__findnew(struct dsos *dsos, const char *name)
+{
+       struct dso *dso;
+       pthread_rwlock_wrlock(&dsos->lock);
+       dso = dso__get(__dsos__findnew(dsos, name));
+       pthread_rwlock_unlock(&dsos->lock);
+       return dso;
 }
 
 size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
@@ -1130,12 +1305,15 @@ size_t dso__fprintf(struct dso *dso, enum map_type type, FILE *fp)
 enum dso_type dso__type(struct dso *dso, struct machine *machine)
 {
        int fd;
+       enum dso_type type = DSO__TYPE_UNKNOWN;
 
-       fd = dso__data_fd(dso, machine);
-       if (fd < 0)
-               return DSO__TYPE_UNKNOWN;
+       fd = dso__data_get_fd(dso, machine);
+       if (fd >= 0) {
+               type = dso__type_fd(fd);
+               dso__data_put_fd(dso);
+       }
 
-       return dso__type_fd(fd);
+       return type;
 }
 
 int dso__strerror_load(struct dso *dso, char *buf, size_t buflen)
index e0901b4ed8de0d08b1c6ad93f20ca371b9adef5f..2fe98bb0e95b0d4b88fb58f2fc28daee875a13bb 100644 (file)
@@ -1,9 +1,11 @@
 #ifndef __PERF_DSO
 #define __PERF_DSO
 
+#include <linux/atomic.h>
 #include <linux/types.h>
 #include <linux/rbtree.h>
 #include <stdbool.h>
+#include <pthread.h>
 #include <linux/types.h>
 #include <linux/bitops.h>
 #include "map.h"
@@ -124,9 +126,13 @@ struct dso_cache {
 struct dsos {
        struct list_head head;
        struct rb_root   root;  /* rbtree root sorted by long name */
+       pthread_rwlock_t lock;
 };
 
+struct auxtrace_cache;
+
 struct dso {
+       pthread_mutex_t  lock;
        struct list_head node;
        struct rb_node   rb_node;       /* rbtree node sorted by long name */
        struct rb_root   symbols[MAP__NR_TYPES];
@@ -156,6 +162,7 @@ struct dso {
        u16              long_name_len;
        u16              short_name_len;
        void            *dwfl;                  /* DWARF debug info */
+       struct auxtrace_cache *auxtrace_cache;
 
        /* dso data file */
        struct {
@@ -173,7 +180,7 @@ struct dso {
                void     *priv;
                u64      db_id;
        };
-
+       atomic_t         refcnt;
        char             name[0];
 };
 
@@ -200,6 +207,17 @@ void dso__set_long_name(struct dso *dso, const char *name, bool name_allocated);
 
 int dso__name_len(const struct dso *dso);
 
+struct dso *dso__get(struct dso *dso);
+void dso__put(struct dso *dso);
+
+static inline void __dso__zput(struct dso **dso)
+{
+       dso__put(*dso);
+       *dso = NULL;
+}
+
+#define dso__zput(dso) __dso__zput(&dso)
+
 bool dso__loaded(const struct dso *dso, enum map_type type);
 
 bool dso__sorted_by_name(const struct dso *dso, enum map_type type);
@@ -216,7 +234,7 @@ char dso__symtab_origin(const struct dso *dso);
 int dso__read_binary_type_filename(const struct dso *dso, enum dso_binary_type type,
                                   char *root_dir, char *filename, size_t size);
 bool is_supported_compression(const char *ext);
-bool is_kernel_module(const char *pathname);
+bool is_kernel_module(const char *pathname, int cpumode);
 bool decompress_to_file(const char *ext, const char *filename, int output_fd);
 bool dso__needs_decompress(struct dso *dso);
 
@@ -236,7 +254,8 @@ int __kmod_path__parse(struct kmod_path *m, const char *path,
 
 /*
  * The dso__data_* external interface provides following functions:
- *   dso__data_fd
+ *   dso__data_get_fd
+ *   dso__data_put_fd
  *   dso__data_close
  *   dso__data_size
  *   dso__data_read_offset
@@ -253,8 +272,11 @@ int __kmod_path__parse(struct kmod_path *m, const char *path,
  * The current usage of the dso__data_* interface is as follows:
  *
  * Get DSO's fd:
- *   int fd = dso__data_fd(dso, machine);
- *   USE 'fd' SOMEHOW
+ *   int fd = dso__data_get_fd(dso, machine);
+ *   if (fd >= 0) {
+ *       USE 'fd' SOMEHOW
+ *       dso__data_put_fd(dso);
+ *   }
  *
  * Read DSO's data:
  *   n = dso__data_read_offset(dso_0, &machine, 0, buf, BUFSIZE);
@@ -273,7 +295,8 @@ int __kmod_path__parse(struct kmod_path *m, const char *path,
  *
  * TODO
 */
-int dso__data_fd(struct dso *dso, struct machine *machine);
+int dso__data_get_fd(struct dso *dso, struct machine *machine);
+void dso__data_put_fd(struct dso *dso __maybe_unused);
 void dso__data_close(struct dso *dso);
 
 off_t dso__data_size(struct dso *dso, struct machine *machine);
@@ -285,14 +308,16 @@ ssize_t dso__data_read_addr(struct dso *dso, struct map *map,
 bool dso__data_status_seen(struct dso *dso, enum dso_data_status_seen by);
 
 struct map *dso__new_map(const char *name);
-struct dso *dso__kernel_findnew(struct machine *machine, const char *name,
-                               const char *short_name, int dso_type);
+struct dso *machine__findnew_kernel(struct machine *machine, const char *name,
+                                   const char *short_name, int dso_type);
 
+void __dsos__add(struct dsos *dsos, struct dso *dso);
 void dsos__add(struct dsos *dsos, struct dso *dso);
-struct dso *dsos__addnew(struct dsos *dsos, const char *name);
-struct dso *dsos__find(const struct dsos *dsos, const char *name,
-                      bool cmp_short);
+struct dso *__dsos__addnew(struct dsos *dsos, const char *name);
+struct dso *__dsos__find(struct dsos *dsos, const char *name, bool cmp_short);
+struct dso *dsos__find(struct dsos *dsos, const char *name, bool cmp_short);
 struct dso *__dsos__findnew(struct dsos *dsos, const char *name);
+struct dso *dsos__findnew(struct dsos *dsos, const char *name);
 bool __dsos__read_build_ids(struct list_head *head, bool with_hits);
 
 size_t __dsos__fprintf_buildid(struct list_head *head, FILE *fp,
index c34e024020c7c58602a9459f407283faea884b20..57f3ef41c2bc3e6261c03f54dc7f99791a003108 100644 (file)
@@ -139,10 +139,26 @@ int cu_walk_functions_at(Dwarf_Die *cu_die, Dwarf_Addr addr,
 bool die_compare_name(Dwarf_Die *dw_die, const char *tname)
 {
        const char *name;
+
        name = dwarf_diename(dw_die);
        return name ? (strcmp(tname, name) == 0) : false;
 }
 
+/**
+ * die_match_name - Match diename and glob
+ * @dw_die: a DIE
+ * @glob: a string of target glob pattern
+ *
+ * Glob matching the name of @dw_die and @glob. Return false if matching fail.
+ */
+bool die_match_name(Dwarf_Die *dw_die, const char *glob)
+{
+       const char *name;
+
+       name = dwarf_diename(dw_die);
+       return name ? strglobmatch(name, glob) : false;
+}
+
 /**
  * die_get_call_lineno - Get callsite line number of inline-function instance
  * @in_die: a DIE of an inlined function instance
@@ -417,6 +433,43 @@ struct __addr_die_search_param {
        Dwarf_Die       *die_mem;
 };
 
+static int __die_search_func_tail_cb(Dwarf_Die *fn_die, void *data)
+{
+       struct __addr_die_search_param *ad = data;
+       Dwarf_Addr addr = 0;
+
+       if (dwarf_tag(fn_die) == DW_TAG_subprogram &&
+           !dwarf_highpc(fn_die, &addr) &&
+           addr == ad->addr) {
+               memcpy(ad->die_mem, fn_die, sizeof(Dwarf_Die));
+               return DWARF_CB_ABORT;
+       }
+       return DWARF_CB_OK;
+}
+
+/**
+ * die_find_tailfunc - Search for a non-inlined function with tail call at
+ * given address
+ * @cu_die: a CU DIE which including @addr
+ * @addr: target address
+ * @die_mem: a buffer for result DIE
+ *
+ * Search for a non-inlined function DIE with tail call at @addr. Stores the
+ * DIE to @die_mem and returns it if found. Returns NULL if failed.
+ */
+Dwarf_Die *die_find_tailfunc(Dwarf_Die *cu_die, Dwarf_Addr addr,
+                                   Dwarf_Die *die_mem)
+{
+       struct __addr_die_search_param ad;
+       ad.addr = addr;
+       ad.die_mem = die_mem;
+       /* dwarf_getscopes can't find subprogram. */
+       if (!dwarf_getfuncs(cu_die, __die_search_func_tail_cb, &ad, 0))
+               return NULL;
+       else
+               return die_mem;
+}
+
 /* die_find callback for non-inlined function search */
 static int __die_search_func_cb(Dwarf_Die *fn_die, void *data)
 {
@@ -832,19 +885,17 @@ Dwarf_Die *die_find_member(Dwarf_Die *st_die, const char *name,
 /**
  * die_get_typename - Get the name of given variable DIE
  * @vr_die: a variable DIE
- * @buf: a buffer for result type name
- * @len: a max-length of @buf
+ * @buf: a strbuf for result type name
  *
- * Get the name of @vr_die and stores it to @buf. Return the actual length
- * of type name if succeeded. Return -E2BIG if @len is not enough long, and
- * Return -ENOENT if failed to find type name.
+ * Get the name of @vr_die and stores it to @buf. Return 0 if succeeded.
+ * and Return -ENOENT if failed to find type name.
  * Note that the result will stores typedef name if possible, and stores
  * "*(function_type)" if the type is a function pointer.
  */
-int die_get_typename(Dwarf_Die *vr_die, char *buf, int len)
+int die_get_typename(Dwarf_Die *vr_die, struct strbuf *buf)
 {
        Dwarf_Die type;
-       int tag, ret, ret2;
+       int tag, ret;
        const char *tmp = "";
 
        if (__die_get_real_type(vr_die, &type) == NULL)
@@ -855,8 +906,8 @@ int die_get_typename(Dwarf_Die *vr_die, char *buf, int len)
                tmp = "*";
        else if (tag == DW_TAG_subroutine_type) {
                /* Function pointer */
-               ret = snprintf(buf, len, "(function_type)");
-               return (ret >= len) ? -E2BIG : ret;
+               strbuf_addf(buf, "(function_type)");
+               return 0;
        } else {
                if (!dwarf_diename(&type))
                        return -ENOENT;
@@ -867,39 +918,156 @@ int die_get_typename(Dwarf_Die *vr_die, char *buf, int len)
                else if (tag == DW_TAG_enumeration_type)
                        tmp = "enum ";
                /* Write a base name */
-               ret = snprintf(buf, len, "%s%s", tmp, dwarf_diename(&type));
-               return (ret >= len) ? -E2BIG : ret;
-       }
-       ret = die_get_typename(&type, buf, len);
-       if (ret > 0) {
-               ret2 = snprintf(buf + ret, len - ret, "%s", tmp);
-               ret = (ret2 >= len - ret) ? -E2BIG : ret2 + ret;
+               strbuf_addf(buf, "%s%s", tmp, dwarf_diename(&type));
+               return 0;
        }
+       ret = die_get_typename(&type, buf);
+       if (ret == 0)
+               strbuf_addf(buf, "%s", tmp);
+
        return ret;
 }
 
 /**
  * die_get_varname - Get the name and type of given variable DIE
  * @vr_die: a variable DIE
- * @buf: a buffer for type and variable name
- * @len: the max-length of @buf
+ * @buf: a strbuf for type and variable name
  *
  * Get the name and type of @vr_die and stores it in @buf as "type\tname".
  */
-int die_get_varname(Dwarf_Die *vr_die, char *buf, int len)
+int die_get_varname(Dwarf_Die *vr_die, struct strbuf *buf)
 {
-       int ret, ret2;
+       int ret;
 
-       ret = die_get_typename(vr_die, buf, len);
+       ret = die_get_typename(vr_die, buf);
        if (ret < 0) {
                pr_debug("Failed to get type, make it unknown.\n");
-               ret = snprintf(buf, len, "(unknown_type)");
+               strbuf_addf(buf, "(unknown_type)");
        }
-       if (ret > 0) {
-               ret2 = snprintf(buf + ret, len - ret, "\t%s",
-                               dwarf_diename(vr_die));
-               ret = (ret2 >= len - ret) ? -E2BIG : ret2 + ret;
+
+       strbuf_addf(buf, "\t%s", dwarf_diename(vr_die));
+
+       return 0;
+}
+
+/**
+ * die_get_var_innermost_scope - Get innermost scope range of given variable DIE
+ * @sp_die: a subprogram DIE
+ * @vr_die: a variable DIE
+ * @buf: a strbuf for variable byte offset range
+ *
+ * Get the innermost scope range of @vr_die and stores it in @buf as
+ * "@<function_name+[NN-NN,NN-NN]>".
+ */
+static int die_get_var_innermost_scope(Dwarf_Die *sp_die, Dwarf_Die *vr_die,
+                               struct strbuf *buf)
+{
+       Dwarf_Die *scopes;
+       int count;
+       size_t offset = 0;
+       Dwarf_Addr base;
+       Dwarf_Addr start, end;
+       Dwarf_Addr entry;
+       int ret;
+       bool first = true;
+       const char *name;
+
+       ret = dwarf_entrypc(sp_die, &entry);
+       if (ret)
+               return ret;
+
+       name = dwarf_diename(sp_die);
+       if (!name)
+               return -ENOENT;
+
+       count = dwarf_getscopes_die(vr_die, &scopes);
+
+       /* (*SCOPES)[1] is the DIE for the scope containing that scope */
+       if (count <= 1) {
+               ret = -EINVAL;
+               goto out;
        }
+
+       while ((offset = dwarf_ranges(&scopes[1], offset, &base,
+                               &start, &end)) > 0) {
+               start -= entry;
+               end -= entry;
+
+               if (first) {
+                       strbuf_addf(buf, "@<%s+[%" PRIu64 "-%" PRIu64,
+                               name, start, end);
+                       first = false;
+               } else {
+                       strbuf_addf(buf, ",%" PRIu64 "-%" PRIu64,
+                               start, end);
+               }
+       }
+
+       if (!first)
+               strbuf_addf(buf, "]>");
+
+out:
+       free(scopes);
        return ret;
 }
 
+/**
+ * die_get_var_range - Get byte offset range of given variable DIE
+ * @sp_die: a subprogram DIE
+ * @vr_die: a variable DIE
+ * @buf: a strbuf for type and variable name and byte offset range
+ *
+ * Get the byte offset range of @vr_die and stores it in @buf as
+ * "@<function_name+[NN-NN,NN-NN]>".
+ */
+int die_get_var_range(Dwarf_Die *sp_die, Dwarf_Die *vr_die, struct strbuf *buf)
+{
+       int ret = 0;
+       Dwarf_Addr base;
+       Dwarf_Addr start, end;
+       Dwarf_Addr entry;
+       Dwarf_Op *op;
+       size_t nops;
+       size_t offset = 0;
+       Dwarf_Attribute attr;
+       bool first = true;
+       const char *name;
+
+       ret = dwarf_entrypc(sp_die, &entry);
+       if (ret)
+               return ret;
+
+       name = dwarf_diename(sp_die);
+       if (!name)
+               return -ENOENT;
+
+       if (dwarf_attr(vr_die, DW_AT_location, &attr) == NULL)
+               return -EINVAL;
+
+       while ((offset = dwarf_getlocations(
+                               &attr, offset, &base,
+                               &start, &end, &op, &nops)) > 0) {
+               if (start == 0) {
+                       /* Single Location Descriptions */
+                       ret = die_get_var_innermost_scope(sp_die, vr_die, buf);
+                       return ret;
+               }
+
+               /* Location Lists */
+               start -= entry;
+               end -= entry;
+               if (first) {
+                       strbuf_addf(buf, "@<%s+[%" PRIu64 "-%" PRIu64,
+                               name, start, end);
+                       first = false;
+               } else {
+                       strbuf_addf(buf, ",%" PRIu64 "-%" PRIu64,
+                               start, end);
+               }
+       }
+
+       if (!first)
+               strbuf_addf(buf, "]>");
+
+       return ret;
+}
index af7dbcd5f929947cfdd0b146f20ad65d45059bb8..c42ec366f2a72fb7999e22f257ebc82765ee4919 100644 (file)
@@ -47,6 +47,9 @@ extern bool die_is_func_instance(Dwarf_Die *dw_die);
 /* Compare diename and tname */
 extern bool die_compare_name(Dwarf_Die *dw_die, const char *tname);
 
+/* Matching diename with glob pattern */
+extern bool die_match_name(Dwarf_Die *dw_die, const char *glob);
+
 /* Get callsite line number of inline-function instance */
 extern int die_get_call_lineno(Dwarf_Die *in_die);
 
@@ -82,6 +85,10 @@ extern Dwarf_Die *die_find_child(Dwarf_Die *rt_die,
 extern Dwarf_Die *die_find_realfunc(Dwarf_Die *cu_die, Dwarf_Addr addr,
                                    Dwarf_Die *die_mem);
 
+/* Search a non-inlined function with tail call at given address */
+Dwarf_Die *die_find_tailfunc(Dwarf_Die *cu_die, Dwarf_Addr addr,
+                                   Dwarf_Die *die_mem);
+
 /* Search the top inlined function including given address */
 extern Dwarf_Die *die_find_top_inlinefunc(Dwarf_Die *sp_die, Dwarf_Addr addr,
                                          Dwarf_Die *die_mem);
@@ -114,8 +121,10 @@ extern Dwarf_Die *die_find_member(Dwarf_Die *st_die, const char *name,
                                  Dwarf_Die *die_mem);
 
 /* Get the name of given variable DIE */
-extern int die_get_typename(Dwarf_Die *vr_die, char *buf, int len);
+extern int die_get_typename(Dwarf_Die *vr_die, struct strbuf *buf);
 
 /* Get the name and type of given variable DIE, stored as "type\tname" */
-extern int die_get_varname(Dwarf_Die *vr_die, char *buf, int len);
+extern int die_get_varname(Dwarf_Die *vr_die, struct strbuf *buf);
+extern int die_get_var_range(Dwarf_Die *sp_die, Dwarf_Die *vr_die,
+                       struct strbuf *buf);
 #endif
index 275b0ee345f5eab4b1dcaebc18d5068698d9e437..7405123692f14919bc1928c67f4819ef3503e1ba 100644 (file)
@@ -5,5 +5,4 @@
  */
 #include "cache.h"
 
-const char *pager_program;
 int pager_use_color = 1;
index ff866c4d2e2f09ea4abc1650d7c413776b3bb93f..d7d986d8f23e5f890cc295b3b830470343f34063 100644 (file)
@@ -23,12 +23,18 @@ static const char *perf_event__names[] = {
        [PERF_RECORD_FORK]                      = "FORK",
        [PERF_RECORD_READ]                      = "READ",
        [PERF_RECORD_SAMPLE]                    = "SAMPLE",
+       [PERF_RECORD_AUX]                       = "AUX",
+       [PERF_RECORD_ITRACE_START]              = "ITRACE_START",
+       [PERF_RECORD_LOST_SAMPLES]              = "LOST_SAMPLES",
        [PERF_RECORD_HEADER_ATTR]               = "ATTR",
        [PERF_RECORD_HEADER_EVENT_TYPE]         = "EVENT_TYPE",
        [PERF_RECORD_HEADER_TRACING_DATA]       = "TRACING_DATA",
        [PERF_RECORD_HEADER_BUILD_ID]           = "BUILD_ID",
        [PERF_RECORD_FINISHED_ROUND]            = "FINISHED_ROUND",
        [PERF_RECORD_ID_INDEX]                  = "ID_INDEX",
+       [PERF_RECORD_AUXTRACE_INFO]             = "AUXTRACE_INFO",
+       [PERF_RECORD_AUXTRACE]                  = "AUXTRACE",
+       [PERF_RECORD_AUXTRACE_ERROR]            = "AUXTRACE_ERROR",
 };
 
 const char *perf_event__name(unsigned int id)
@@ -212,10 +218,14 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
                                       pid_t pid, pid_t tgid,
                                       perf_event__handler_t process,
                                       struct machine *machine,
-                                      bool mmap_data)
+                                      bool mmap_data,
+                                      unsigned int proc_map_timeout)
 {
        char filename[PATH_MAX];
        FILE *fp;
+       unsigned long long t;
+       bool truncation = false;
+       unsigned long long timeout = proc_map_timeout * 1000000ULL;
        int rc = 0;
 
        if (machine__is_default_guest(machine))
@@ -234,6 +244,7 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
        }
 
        event->header.type = PERF_RECORD_MMAP2;
+       t = rdclock();
 
        while (1) {
                char bf[BUFSIZ];
@@ -247,6 +258,15 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
                if (fgets(bf, sizeof(bf), fp) == NULL)
                        break;
 
+               if ((rdclock() - t) > timeout) {
+                       pr_warning("Reading %s time out. "
+                                  "You may want to increase "
+                                  "the time limit by --proc-map-timeout\n",
+                                  filename);
+                       truncation = true;
+                       goto out;
+               }
+
                /* ensure null termination since stack will be reused. */
                strcpy(execname, "");
 
@@ -295,6 +315,10 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
                        event->header.misc |= PERF_RECORD_MISC_MMAP_DATA;
                }
 
+out:
+               if (truncation)
+                       event->header.misc |= PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT;
+
                if (!strcmp(execname, ""))
                        strcpy(execname, anonstr);
 
@@ -313,6 +337,9 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
                        rc = -1;
                        break;
                }
+
+               if (truncation)
+                       break;
        }
 
        fclose(fp);
@@ -324,8 +351,9 @@ int perf_event__synthesize_modules(struct perf_tool *tool,
                                   struct machine *machine)
 {
        int rc = 0;
-       struct rb_node *nd;
+       struct map *pos;
        struct map_groups *kmaps = &machine->kmaps;
+       struct maps *maps = &kmaps->maps[MAP__FUNCTION];
        union perf_event *event = zalloc((sizeof(event->mmap) +
                                          machine->id_hdr_size));
        if (event == NULL) {
@@ -345,10 +373,8 @@ int perf_event__synthesize_modules(struct perf_tool *tool,
        else
                event->header.misc = PERF_RECORD_MISC_GUEST_KERNEL;
 
-       for (nd = rb_first(&kmaps->maps[MAP__FUNCTION]);
-            nd; nd = rb_next(nd)) {
+       for (pos = maps__first(maps); pos; pos = map__next(pos)) {
                size_t size;
-               struct map *pos = rb_entry(nd, struct map, rb_node);
 
                if (pos->dso->kernel)
                        continue;
@@ -381,7 +407,9 @@ static int __event__synthesize_thread(union perf_event *comm_event,
                                      pid_t pid, int full,
                                          perf_event__handler_t process,
                                      struct perf_tool *tool,
-                                     struct machine *machine, bool mmap_data)
+                                     struct machine *machine,
+                                     bool mmap_data,
+                                     unsigned int proc_map_timeout)
 {
        char filename[PATH_MAX];
        DIR *tasks;
@@ -398,7 +426,8 @@ static int __event__synthesize_thread(union perf_event *comm_event,
                        return -1;
 
                return perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
-                                                         process, machine, mmap_data);
+                                                         process, machine, mmap_data,
+                                                         proc_map_timeout);
        }
 
        if (machine__is_default_guest(machine))
@@ -439,7 +468,7 @@ static int __event__synthesize_thread(union perf_event *comm_event,
                if (_pid == pid) {
                        /* process the parent's maps too */
                        rc = perf_event__synthesize_mmap_events(tool, mmap_event, pid, tgid,
-                                               process, machine, mmap_data);
+                                               process, machine, mmap_data, proc_map_timeout);
                        if (rc)
                                break;
                }
@@ -453,7 +482,8 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
                                      struct thread_map *threads,
                                      perf_event__handler_t process,
                                      struct machine *machine,
-                                     bool mmap_data)
+                                     bool mmap_data,
+                                     unsigned int proc_map_timeout)
 {
        union perf_event *comm_event, *mmap_event, *fork_event;
        int err = -1, thread, j;
@@ -476,7 +506,7 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
                                               fork_event,
                                               threads->map[thread], 0,
                                               process, tool, machine,
-                                              mmap_data)) {
+                                              mmap_data, proc_map_timeout)) {
                        err = -1;
                        break;
                }
@@ -502,7 +532,7 @@ int perf_event__synthesize_thread_map(struct perf_tool *tool,
                                                       fork_event,
                                                       comm_event->comm.pid, 0,
                                                       process, tool, machine,
-                                                      mmap_data)) {
+                                                      mmap_data, proc_map_timeout)) {
                                err = -1;
                                break;
                        }
@@ -519,7 +549,9 @@ out:
 
 int perf_event__synthesize_threads(struct perf_tool *tool,
                                   perf_event__handler_t process,
-                                  struct machine *machine, bool mmap_data)
+                                  struct machine *machine,
+                                  bool mmap_data,
+                                  unsigned int proc_map_timeout)
 {
        DIR *proc;
        char proc_path[PATH_MAX];
@@ -559,7 +591,8 @@ int perf_event__synthesize_threads(struct perf_tool *tool,
                 * one thread couldn't be synthesized.
                 */
                __event__synthesize_thread(comm_event, mmap_event, fork_event, pid,
-                                          1, process, tool, machine, mmap_data);
+                                          1, process, tool, machine, mmap_data,
+                                          proc_map_timeout);
        }
 
        err = 0;
@@ -692,6 +725,30 @@ int perf_event__process_lost(struct perf_tool *tool __maybe_unused,
        return machine__process_lost_event(machine, event, sample);
 }
 
+int perf_event__process_aux(struct perf_tool *tool __maybe_unused,
+                           union perf_event *event,
+                           struct perf_sample *sample __maybe_unused,
+                           struct machine *machine)
+{
+       return machine__process_aux_event(machine, event);
+}
+
+int perf_event__process_itrace_start(struct perf_tool *tool __maybe_unused,
+                                    union perf_event *event,
+                                    struct perf_sample *sample __maybe_unused,
+                                    struct machine *machine)
+{
+       return machine__process_itrace_start_event(machine, event);
+}
+
+int perf_event__process_lost_samples(struct perf_tool *tool __maybe_unused,
+                                    union perf_event *event,
+                                    struct perf_sample *sample,
+                                    struct machine *machine)
+{
+       return machine__process_lost_samples_event(machine, event, sample);
+}
+
 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp)
 {
        return fprintf(fp, " %d/%d: [%#" PRIx64 "(%#" PRIx64 ") @ %#" PRIx64 "]: %c %s\n",
@@ -755,6 +812,21 @@ int perf_event__process_exit(struct perf_tool *tool __maybe_unused,
        return machine__process_exit_event(machine, event, sample);
 }
 
+size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp)
+{
+       return fprintf(fp, " offset: %#"PRIx64" size: %#"PRIx64" flags: %#"PRIx64" [%s%s]\n",
+                      event->aux.aux_offset, event->aux.aux_size,
+                      event->aux.flags,
+                      event->aux.flags & PERF_AUX_FLAG_TRUNCATED ? "T" : "",
+                      event->aux.flags & PERF_AUX_FLAG_OVERWRITE ? "O" : "");
+}
+
+size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp)
+{
+       return fprintf(fp, " pid: %u tid: %u\n",
+                      event->itrace_start.pid, event->itrace_start.tid);
+}
+
 size_t perf_event__fprintf(union perf_event *event, FILE *fp)
 {
        size_t ret = fprintf(fp, "PERF_RECORD_%s",
@@ -774,6 +846,12 @@ size_t perf_event__fprintf(union perf_event *event, FILE *fp)
        case PERF_RECORD_MMAP2:
                ret += perf_event__fprintf_mmap2(event, fp);
                break;
+       case PERF_RECORD_AUX:
+               ret += perf_event__fprintf_aux(event, fp);
+               break;
+       case PERF_RECORD_ITRACE_START:
+               ret += perf_event__fprintf_itrace_start(event, fp);
+               break;
        default:
                ret += fprintf(fp, "\n");
        }
@@ -877,6 +955,10 @@ void thread__find_addr_location(struct thread *thread,
                al->sym = NULL;
 }
 
+/*
+ * Callers need to drop the reference to al->thread, obtained in
+ * machine__findnew_thread()
+ */
 int perf_event__preprocess_sample(const union perf_event *event,
                                  struct machine *machine,
                                  struct addr_location *al,
@@ -937,6 +1019,17 @@ int perf_event__preprocess_sample(const union perf_event *event,
        return 0;
 }
 
+/*
+ * The preprocess_sample method will return with reference counts for the
+ * in it, when done using (and perhaps getting ref counts if needing to
+ * keep a pointer to one of those entries) it must be paired with
+ * addr_location__put(), so that the refcounts can be decremented.
+ */
+void addr_location__put(struct addr_location *al)
+{
+       thread__zput(al->thread);
+}
+
 bool is_bts_event(struct perf_event_attr *attr)
 {
        return attr->type == PERF_TYPE_HARDWARE &&
index 09b9e8d3fcf7fae705afcc0d3ea688f2aca77665..c53f36384b64532abec852c9e6d7a496a0d982a6 100644 (file)
@@ -52,6 +52,11 @@ struct lost_event {
        u64 lost;
 };
 
+struct lost_samples_event {
+       struct perf_event_header header;
+       u64 lost;
+};
+
 /*
  * PERF_FORMAT_ENABLED | PERF_FORMAT_RUNNING | PERF_FORMAT_ID
  */
@@ -157,6 +162,8 @@ enum {
        PERF_IP_FLAG_IN_TX              = 1ULL << 10,
 };
 
+#define PERF_IP_FLAG_CHARS "bcrosyiABEx"
+
 #define PERF_BRANCH_MASK               (\
        PERF_IP_FLAG_BRANCH             |\
        PERF_IP_FLAG_CALL               |\
@@ -215,9 +222,17 @@ enum perf_user_event_type { /* above any possible kernel type */
        PERF_RECORD_HEADER_BUILD_ID             = 67,
        PERF_RECORD_FINISHED_ROUND              = 68,
        PERF_RECORD_ID_INDEX                    = 69,
+       PERF_RECORD_AUXTRACE_INFO               = 70,
+       PERF_RECORD_AUXTRACE                    = 71,
+       PERF_RECORD_AUXTRACE_ERROR              = 72,
        PERF_RECORD_HEADER_MAX
 };
 
+enum auxtrace_error_type {
+       PERF_AUXTRACE_ERROR_ITRACE  = 1,
+       PERF_AUXTRACE_ERROR_MAX
+};
+
 /*
  * The kernel collects the number of events it couldn't send in a stretch and
  * when possible sends this number in a PERF_RECORD_LOST event. The number of
@@ -225,6 +240,12 @@ enum perf_user_event_type { /* above any possible kernel type */
  * total_lost tells exactly how many events the kernel in fact lost, i.e. it is
  * the sum of all struct lost_event.lost fields reported.
  *
+ * The kernel discards mixed up samples and sends the number in a
+ * PERF_RECORD_LOST_SAMPLES event. The number of lost-samples events is stored
+ * in .nr_events[PERF_RECORD_LOST_SAMPLES] while total_lost_samples tells
+ * exactly how many samples the kernel in fact dropped, i.e. it is the sum of
+ * all struct lost_samples_event.lost fields reported.
+ *
  * The total_period is needed because by default auto-freq is used, so
  * multipling nr_events[PERF_EVENT_SAMPLE] by a frequency isn't possible to get
  * the total number of low level events, it is necessary to to sum all struct
@@ -234,6 +255,7 @@ struct events_stats {
        u64 total_period;
        u64 total_non_filtered_period;
        u64 total_lost;
+       u64 total_lost_samples;
        u64 total_invalid_chains;
        u32 nr_events[PERF_RECORD_HEADER_MAX];
        u32 nr_non_filtered_samples;
@@ -242,6 +264,8 @@ struct events_stats {
        u32 nr_invalid_chains;
        u32 nr_unknown_id;
        u32 nr_unprocessable_samples;
+       u32 nr_auxtrace_errors[PERF_AUXTRACE_ERROR_MAX];
+       u32 nr_proc_map_timeout;
 };
 
 struct attr_event {
@@ -280,6 +304,50 @@ struct id_index_event {
        struct id_index_entry entries[0];
 };
 
+struct auxtrace_info_event {
+       struct perf_event_header header;
+       u32 type;
+       u32 reserved__; /* For alignment */
+       u64 priv[];
+};
+
+struct auxtrace_event {
+       struct perf_event_header header;
+       u64 size;
+       u64 offset;
+       u64 reference;
+       u32 idx;
+       u32 tid;
+       u32 cpu;
+       u32 reserved__; /* For alignment */
+};
+
+#define MAX_AUXTRACE_ERROR_MSG 64
+
+struct auxtrace_error_event {
+       struct perf_event_header header;
+       u32 type;
+       u32 code;
+       u32 cpu;
+       u32 pid;
+       u32 tid;
+       u32 reserved__; /* For alignment */
+       u64 ip;
+       char msg[MAX_AUXTRACE_ERROR_MSG];
+};
+
+struct aux_event {
+       struct perf_event_header header;
+       u64     aux_offset;
+       u64     aux_size;
+       u64     flags;
+};
+
+struct itrace_start_event {
+       struct perf_event_header header;
+       u32 pid, tid;
+};
+
 union perf_event {
        struct perf_event_header        header;
        struct mmap_event               mmap;
@@ -287,6 +355,7 @@ union perf_event {
        struct comm_event               comm;
        struct fork_event               fork;
        struct lost_event               lost;
+       struct lost_samples_event       lost_samples;
        struct read_event               read;
        struct throttle_event           throttle;
        struct sample_event             sample;
@@ -295,6 +364,11 @@ union perf_event {
        struct tracing_data_event       tracing_data;
        struct build_id_event           build_id;
        struct id_index_event           id_index;
+       struct auxtrace_info_event      auxtrace_info;
+       struct auxtrace_event           auxtrace;
+       struct auxtrace_error_event     auxtrace_error;
+       struct aux_event                aux;
+       struct itrace_start_event       itrace_start;
 };
 
 void perf_event__print_totals(void);
@@ -310,10 +384,12 @@ typedef int (*perf_event__handler_t)(struct perf_tool *tool,
 int perf_event__synthesize_thread_map(struct perf_tool *tool,
                                      struct thread_map *threads,
                                      perf_event__handler_t process,
-                                     struct machine *machine, bool mmap_data);
+                                     struct machine *machine, bool mmap_data,
+                                     unsigned int proc_map_timeout);
 int perf_event__synthesize_threads(struct perf_tool *tool,
                                   perf_event__handler_t process,
-                                  struct machine *machine, bool mmap_data);
+                                  struct machine *machine, bool mmap_data,
+                                  unsigned int proc_map_timeout);
 int perf_event__synthesize_kernel_mmap(struct perf_tool *tool,
                                       perf_event__handler_t process,
                                       struct machine *machine);
@@ -330,6 +406,18 @@ int perf_event__process_lost(struct perf_tool *tool,
                             union perf_event *event,
                             struct perf_sample *sample,
                             struct machine *machine);
+int perf_event__process_lost_samples(struct perf_tool *tool,
+                                    union perf_event *event,
+                                    struct perf_sample *sample,
+                                    struct machine *machine);
+int perf_event__process_aux(struct perf_tool *tool,
+                           union perf_event *event,
+                           struct perf_sample *sample,
+                           struct machine *machine);
+int perf_event__process_itrace_start(struct perf_tool *tool,
+                                    union perf_event *event,
+                                    struct perf_sample *sample,
+                                    struct machine *machine);
 int perf_event__process_mmap(struct perf_tool *tool,
                             union perf_event *event,
                             struct perf_sample *sample,
@@ -358,6 +446,8 @@ int perf_event__preprocess_sample(const union perf_event *event,
                                  struct addr_location *al,
                                  struct perf_sample *sample);
 
+void addr_location__put(struct addr_location *al);
+
 struct thread;
 
 bool is_bts_event(struct perf_event_attr *attr);
@@ -381,12 +471,15 @@ int perf_event__synthesize_mmap_events(struct perf_tool *tool,
                                       pid_t pid, pid_t tgid,
                                       perf_event__handler_t process,
                                       struct machine *machine,
-                                      bool mmap_data);
+                                      bool mmap_data,
+                                      unsigned int proc_map_timeout);
 
 size_t perf_event__fprintf_comm(union perf_event *event, FILE *fp);
 size_t perf_event__fprintf_mmap(union perf_event *event, FILE *fp);
 size_t perf_event__fprintf_mmap2(union perf_event *event, FILE *fp);
 size_t perf_event__fprintf_task(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_aux(union perf_event *event, FILE *fp);
+size_t perf_event__fprintf_itrace_start(union perf_event *event, FILE *fp);
 size_t perf_event__fprintf(union perf_event *event, FILE *fp);
 
 u64 kallsyms__get_function_start(const char *kallsyms_filename,
index 080be93eea969f9893bd789cde845bf4a94a37bd..8366511b45f8327a65dc44e80544f6b08e0df24a 100644 (file)
@@ -297,6 +297,8 @@ void perf_evlist__disable(struct perf_evlist *evlist)
                                      PERF_EVENT_IOC_DISABLE, 0);
                }
        }
+
+       evlist->enabled = false;
 }
 
 void perf_evlist__enable(struct perf_evlist *evlist)
@@ -316,6 +318,13 @@ void perf_evlist__enable(struct perf_evlist *evlist)
                                      PERF_EVENT_IOC_ENABLE, 0);
                }
        }
+
+       evlist->enabled = true;
+}
+
+void perf_evlist__toggle_enable(struct perf_evlist *evlist)
+{
+       (evlist->enabled ? perf_evlist__disable : perf_evlist__enable)(evlist);
 }
 
 int perf_evlist__disable_event(struct perf_evlist *evlist,
@@ -634,11 +643,18 @@ static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
 union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
 {
        struct perf_mmap *md = &evlist->mmap[idx];
-       u64 head = perf_mmap__read_head(md);
+       u64 head;
        u64 old = md->prev;
        unsigned char *data = md->base + page_size;
        union perf_event *event = NULL;
 
+       /*
+        * Check if event was unmapped due to a POLLHUP/POLLERR.
+        */
+       if (!atomic_read(&md->refcnt))
+               return NULL;
+
+       head = perf_mmap__read_head(md);
        if (evlist->overwrite) {
                /*
                 * If we're further behind than half the buffer, there's a chance
@@ -695,19 +711,19 @@ union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
 
 static bool perf_mmap__empty(struct perf_mmap *md)
 {
-       return perf_mmap__read_head(md) == md->prev;
+       return perf_mmap__read_head(md) == md->prev && !md->auxtrace_mmap.base;
 }
 
 static void perf_evlist__mmap_get(struct perf_evlist *evlist, int idx)
 {
-       ++evlist->mmap[idx].refcnt;
+       atomic_inc(&evlist->mmap[idx].refcnt);
 }
 
 static void perf_evlist__mmap_put(struct perf_evlist *evlist, int idx)
 {
-       BUG_ON(evlist->mmap[idx].refcnt == 0);
+       BUG_ON(atomic_read(&evlist->mmap[idx].refcnt) == 0);
 
-       if (--evlist->mmap[idx].refcnt == 0)
+       if (atomic_dec_and_test(&evlist->mmap[idx].refcnt))
                __perf_evlist__munmap(evlist, idx);
 }
 
@@ -721,17 +737,46 @@ void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
                perf_mmap__write_tail(md, old);
        }
 
-       if (md->refcnt == 1 && perf_mmap__empty(md))
+       if (atomic_read(&md->refcnt) == 1 && perf_mmap__empty(md))
                perf_evlist__mmap_put(evlist, idx);
 }
 
+int __weak auxtrace_mmap__mmap(struct auxtrace_mmap *mm __maybe_unused,
+                              struct auxtrace_mmap_params *mp __maybe_unused,
+                              void *userpg __maybe_unused,
+                              int fd __maybe_unused)
+{
+       return 0;
+}
+
+void __weak auxtrace_mmap__munmap(struct auxtrace_mmap *mm __maybe_unused)
+{
+}
+
+void __weak auxtrace_mmap_params__init(
+                       struct auxtrace_mmap_params *mp __maybe_unused,
+                       off_t auxtrace_offset __maybe_unused,
+                       unsigned int auxtrace_pages __maybe_unused,
+                       bool auxtrace_overwrite __maybe_unused)
+{
+}
+
+void __weak auxtrace_mmap_params__set_idx(
+                       struct auxtrace_mmap_params *mp __maybe_unused,
+                       struct perf_evlist *evlist __maybe_unused,
+                       int idx __maybe_unused,
+                       bool per_cpu __maybe_unused)
+{
+}
+
 static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
 {
        if (evlist->mmap[idx].base != NULL) {
                munmap(evlist->mmap[idx].base, evlist->mmap_len);
                evlist->mmap[idx].base = NULL;
-               evlist->mmap[idx].refcnt = 0;
+               atomic_set(&evlist->mmap[idx].refcnt, 0);
        }
+       auxtrace_mmap__munmap(&evlist->mmap[idx].auxtrace_mmap);
 }
 
 void perf_evlist__munmap(struct perf_evlist *evlist)
@@ -759,6 +804,7 @@ static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
 struct mmap_params {
        int prot;
        int mask;
+       struct auxtrace_mmap_params auxtrace_mp;
 };
 
 static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
@@ -777,7 +823,7 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
         * evlist layer can't just drop it when filtering events in
         * perf_evlist__filter_pollfd().
         */
-       evlist->mmap[idx].refcnt = 2;
+       atomic_set(&evlist->mmap[idx].refcnt, 2);
        evlist->mmap[idx].prev = 0;
        evlist->mmap[idx].mask = mp->mask;
        evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, mp->prot,
@@ -789,6 +835,10 @@ static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
                return -1;
        }
 
+       if (auxtrace_mmap__mmap(&evlist->mmap[idx].auxtrace_mmap,
+                               &mp->auxtrace_mp, evlist->mmap[idx].base, fd))
+               return -1;
+
        return 0;
 }
 
@@ -853,6 +903,9 @@ static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
        for (cpu = 0; cpu < nr_cpus; cpu++) {
                int output = -1;
 
+               auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
+                                             true);
+
                for (thread = 0; thread < nr_threads; thread++) {
                        if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
                                                        thread, &output))
@@ -878,6 +931,9 @@ static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
        for (thread = 0; thread < nr_threads; thread++) {
                int output = -1;
 
+               auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
+                                             false);
+
                if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
                                                &output))
                        goto out_unmap;
@@ -960,10 +1016,8 @@ static long parse_pages_arg(const char *str, unsigned long min,
        return pages;
 }
 
-int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
-                                 int unset __maybe_unused)
+int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
 {
-       unsigned int *mmap_pages = opt->value;
        unsigned long max = UINT_MAX;
        long pages;
 
@@ -980,20 +1034,32 @@ int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
        return 0;
 }
 
+int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
+                                 int unset __maybe_unused)
+{
+       return __perf_evlist__parse_mmap_pages(opt->value, str);
+}
+
 /**
- * perf_evlist__mmap - Create mmaps to receive events.
+ * perf_evlist__mmap_ex - Create mmaps to receive events.
  * @evlist: list of events
  * @pages: map length in pages
  * @overwrite: overwrite older events?
+ * @auxtrace_pages - auxtrace map length in pages
+ * @auxtrace_overwrite - overwrite older auxtrace data?
  *
  * If @overwrite is %false the user needs to signal event consumption using
  * perf_mmap__write_tail().  Using perf_evlist__mmap_read() does this
  * automatically.
  *
+ * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
+ * consumption using auxtrace_mmap__write_tail().
+ *
  * Return: %0 on success, negative error code otherwise.
  */
-int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
-                     bool overwrite)
+int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
+                        bool overwrite, unsigned int auxtrace_pages,
+                        bool auxtrace_overwrite)
 {
        struct perf_evsel *evsel;
        const struct cpu_map *cpus = evlist->cpus;
@@ -1013,6 +1079,9 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
        pr_debug("mmap size %zuB\n", evlist->mmap_len);
        mp.mask = evlist->mmap_len - page_size - 1;
 
+       auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->mmap_len,
+                                  auxtrace_pages, auxtrace_overwrite);
+
        evlist__for_each(evlist, evsel) {
                if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
                    evsel->sample_id == NULL &&
@@ -1026,6 +1095,12 @@ int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
        return perf_evlist__mmap_per_cpu(evlist, &mp);
 }
 
+int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
+                     bool overwrite)
+{
+       return perf_evlist__mmap_ex(evlist, pages, overwrite, 0, false);
+}
+
 int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
 {
        evlist->threads = thread_map__new_str(target->pid, target->tid,
index b5cce95d644e0c3af3c04d5edd91cdaf3aabb20d..a8489b9d2812baecf1ba4709ff3b6da9787adda0 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef __PERF_EVLIST_H
 #define __PERF_EVLIST_H 1
 
+#include <linux/atomic.h>
 #include <linux/list.h>
 #include <api/fd/array.h>
 #include <stdio.h>
@@ -8,6 +9,7 @@
 #include "event.h"
 #include "evsel.h"
 #include "util.h"
+#include "auxtrace.h"
 #include <unistd.h>
 
 struct pollfd;
@@ -26,8 +28,9 @@ struct record_opts;
 struct perf_mmap {
        void             *base;
        int              mask;
-       int              refcnt;
+       atomic_t         refcnt;
        u64              prev;
+       struct auxtrace_mmap auxtrace_mmap;
        char             event_copy[PERF_SAMPLE_MAX_SIZE] __attribute__((aligned(8)));
 };
 
@@ -37,6 +40,8 @@ struct perf_evlist {
        int              nr_entries;
        int              nr_groups;
        int              nr_mmaps;
+       bool             overwrite;
+       bool             enabled;
        size_t           mmap_len;
        int              id_pos;
        int              is_pos;
@@ -45,7 +50,6 @@ struct perf_evlist {
                int     cork_fd;
                pid_t   pid;
        } workload;
-       bool             overwrite;
        struct fdarray   pollfd;
        struct perf_mmap *mmap;
        struct thread_map *threads;
@@ -122,16 +126,21 @@ int perf_evlist__start_workload(struct perf_evlist *evlist);
 
 struct option;
 
+int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str);
 int perf_evlist__parse_mmap_pages(const struct option *opt,
                                  const char *str,
                                  int unset);
 
+int perf_evlist__mmap_ex(struct perf_evlist *evlist, unsigned int pages,
+                        bool overwrite, unsigned int auxtrace_pages,
+                        bool auxtrace_overwrite);
 int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
                      bool overwrite);
 void perf_evlist__munmap(struct perf_evlist *evlist);
 
 void perf_evlist__disable(struct perf_evlist *evlist);
 void perf_evlist__enable(struct perf_evlist *evlist);
+void perf_evlist__toggle_enable(struct perf_evlist *evlist);
 
 int perf_evlist__disable_event(struct perf_evlist *evlist,
                               struct perf_evsel *evsel);
index 33e3fd8c2e682d19c8c8dbda12b9815199ef47bf..33449decf7bd2c24d981fdf30c10042063a503ee 100644 (file)
@@ -26,6 +26,7 @@
 #include "perf_regs.h"
 #include "debug.h"
 #include "trace-event.h"
+#include "stat.h"
 
 static struct {
        bool sample_id_all;
@@ -851,19 +852,6 @@ int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
        return 0;
 }
 
-void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus)
-{
-       memset(evsel->counts, 0, (sizeof(*evsel->counts) +
-                                (ncpus * sizeof(struct perf_counts_values))));
-}
-
-int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
-{
-       evsel->counts = zalloc((sizeof(*evsel->counts) +
-                               (ncpus * sizeof(struct perf_counts_values))));
-       return evsel->counts != NULL ? 0 : -ENOMEM;
-}
-
 static void perf_evsel__free_fd(struct perf_evsel *evsel)
 {
        xyarray__delete(evsel->fd);
@@ -891,11 +879,6 @@ void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
                }
 }
 
-void perf_evsel__free_counts(struct perf_evsel *evsel)
-{
-       zfree(&evsel->counts);
-}
-
 void perf_evsel__exit(struct perf_evsel *evsel)
 {
        assert(list_empty(&evsel->node));
@@ -1058,7 +1041,7 @@ static void __p_read_format(char *buf, size_t size, u64 value)
 
 #define BUF_SIZE               1024
 
-#define p_hex(val)             snprintf(buf, BUF_SIZE, "%"PRIx64, (uint64_t)(val))
+#define p_hex(val)             snprintf(buf, BUF_SIZE, "%#"PRIx64, (uint64_t)(val))
 #define p_unsigned(val)                snprintf(buf, BUF_SIZE, "%"PRIu64, (uint64_t)(val))
 #define p_signed(val)          snprintf(buf, BUF_SIZE, "%"PRId64, (int64_t)(val))
 #define p_sample_type(val)     __p_sample_type(buf, BUF_SIZE, val)
@@ -1121,6 +1104,7 @@ int perf_event_attr__fprintf(FILE *fp, struct perf_event_attr *attr,
        PRINT_ATTRf(sample_stack_user, p_unsigned);
        PRINT_ATTRf(clockid, p_signed);
        PRINT_ATTRf(sample_regs_intr, p_hex);
+       PRINT_ATTRf(aux_watermark, p_unsigned);
 
        return ret;
 }
@@ -2148,7 +2132,9 @@ int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
        case EMFILE:
                return scnprintf(msg, size, "%s",
                         "Too many events are opened.\n"
-                        "Try again after reducing the number of events.");
+                        "Probably the maximum number of open file descriptors has been reached.\n"
+                        "Hint: Try again after reducing the number of events.\n"
+                        "Hint: Try increasing the limit with 'ulimit -n <limit>'");
        case ENODEV:
                if (target->cpu_list)
                        return scnprintf(msg, size, "%s",
index e486151b03089720eed68436bbae7a454defb3ec..bb0579e8a10a4556119c5aa313cd22ddacdbc220 100644 (file)
@@ -73,7 +73,6 @@ struct perf_evsel {
        char                    *name;
        double                  scale;
        const char              *unit;
-       bool                    snapshot;
        struct event_format     *tp_format;
        union {
                void            *priv;
@@ -86,6 +85,7 @@ struct perf_evsel {
        unsigned int            sample_size;
        int                     id_pos;
        int                     is_pos;
+       bool                    snapshot;
        bool                    supported;
        bool                    needs_swap;
        bool                    no_aux_samples;
@@ -93,11 +93,11 @@ struct perf_evsel {
        bool                    system_wide;
        bool                    tracking;
        bool                    per_pkg;
-       unsigned long           *per_pkg_mask;
        /* parse modifier helper */
        int                     exclude_GH;
        int                     nr_members;
        int                     sample_read;
+       unsigned long           *per_pkg_mask;
        struct perf_evsel       *leader;
        char                    *group_name;
 };
@@ -170,9 +170,6 @@ const char *perf_evsel__group_name(struct perf_evsel *evsel);
 int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size);
 
 int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads);
-int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
-void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus);
-void perf_evsel__free_counts(struct perf_evsel *evsel);
 void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads);
 
 void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
index 918fd8ae2d80bca007a8b6f7e2f8488fed0771a3..21a77e7a171e8aa0664d5caf0737f141bc96e62f 100644 (file)
@@ -869,6 +869,20 @@ static int write_branch_stack(int fd __maybe_unused,
        return 0;
 }
 
+static int write_auxtrace(int fd, struct perf_header *h,
+                         struct perf_evlist *evlist __maybe_unused)
+{
+       struct perf_session *session;
+       int err;
+
+       session = container_of(h, struct perf_session, header);
+
+       err = auxtrace_index__write(fd, &session->auxtrace_index);
+       if (err < 0)
+               pr_err("Failed to write auxtrace index\n");
+       return err;
+}
+
 static void print_hostname(struct perf_header *ph, int fd __maybe_unused,
                           FILE *fp)
 {
@@ -1151,6 +1165,12 @@ static void print_branch_stack(struct perf_header *ph __maybe_unused,
        fprintf(fp, "# contains samples with branch stack\n");
 }
 
+static void print_auxtrace(struct perf_header *ph __maybe_unused,
+                          int fd __maybe_unused, FILE *fp)
+{
+       fprintf(fp, "# contains AUX area data (e.g. instruction trace)\n");
+}
+
 static void print_pmu_mappings(struct perf_header *ph, int fd __maybe_unused,
                               FILE *fp)
 {
@@ -1218,9 +1238,8 @@ static int __event_process_build_id(struct build_id_event *bev,
                                    struct perf_session *session)
 {
        int err = -1;
-       struct dsos *dsos;
        struct machine *machine;
-       u16 misc;
+       u16 cpumode;
        struct dso *dso;
        enum dso_kernel_type dso_type;
 
@@ -1228,39 +1247,37 @@ static int __event_process_build_id(struct build_id_event *bev,
        if (!machine)
                goto out;
 
-       misc = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
+       cpumode = bev->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
 
-       switch (misc) {
+       switch (cpumode) {
        case PERF_RECORD_MISC_KERNEL:
                dso_type = DSO_TYPE_KERNEL;
-               dsos = &machine->kernel_dsos;
                break;
        case PERF_RECORD_MISC_GUEST_KERNEL:
                dso_type = DSO_TYPE_GUEST_KERNEL;
-               dsos = &machine->kernel_dsos;
                break;
        case PERF_RECORD_MISC_USER:
        case PERF_RECORD_MISC_GUEST_USER:
                dso_type = DSO_TYPE_USER;
-               dsos = &machine->user_dsos;
                break;
        default:
                goto out;
        }
 
-       dso = __dsos__findnew(dsos, filename);
+       dso = machine__findnew_dso(machine, filename);
        if (dso != NULL) {
                char sbuild_id[BUILD_ID_SIZE * 2 + 1];
 
                dso__set_build_id(dso, &bev->build_id);
 
-               if (!is_kernel_module(filename))
+               if (!is_kernel_module(filename, cpumode))
                        dso->kernel = dso_type;
 
                build_id__sprintf(dso->build_id, sizeof(dso->build_id),
                                  sbuild_id);
                pr_debug("build id event received for %s: %s\n",
                         dso->long_name, sbuild_id);
+               dso__put(dso);
        }
 
        err = 0;
@@ -1821,6 +1838,22 @@ out_free:
        return ret;
 }
 
+static int process_auxtrace(struct perf_file_section *section,
+                           struct perf_header *ph, int fd,
+                           void *data __maybe_unused)
+{
+       struct perf_session *session;
+       int err;
+
+       session = container_of(ph, struct perf_session, header);
+
+       err = auxtrace_index__process(fd, section->size, session,
+                                     ph->needs_swap);
+       if (err < 0)
+               pr_err("Failed to process auxtrace index\n");
+       return err;
+}
+
 struct feature_ops {
        int (*write)(int fd, struct perf_header *h, struct perf_evlist *evlist);
        void (*print)(struct perf_header *h, int fd, FILE *fp);
@@ -1861,6 +1894,7 @@ static const struct feature_ops feat_ops[HEADER_LAST_FEATURE] = {
        FEAT_OPA(HEADER_BRANCH_STACK,   branch_stack),
        FEAT_OPP(HEADER_PMU_MAPPINGS,   pmu_mappings),
        FEAT_OPP(HEADER_GROUP_DESC,     group_desc),
+       FEAT_OPP(HEADER_AUXTRACE,       auxtrace),
 };
 
 struct header_print_data {
index 3bb90ac172a1ba0c8429494f4ebf501909a55274..d4d57962c59129d6121b61921b1c1bf58fe2d12a 100644 (file)
@@ -30,6 +30,7 @@ enum {
        HEADER_BRANCH_STACK,
        HEADER_PMU_MAPPINGS,
        HEADER_GROUP_DESC,
+       HEADER_AUXTRACE,
        HEADER_LAST_FEATURE,
        HEADER_FEAT_BITS        = 256,
 };
index cc22b9158b93c41fd0d44cd451ba189c56bc19dd..6f28d53d4e46093293e71363d9aa5e7c1e0b23f5 100644 (file)
@@ -313,8 +313,7 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template,
                                memset(&he->stat, 0, sizeof(he->stat));
                }
 
-               if (he->ms.map)
-                       he->ms.map->referenced = true;
+               map__get(he->ms.map);
 
                if (he->branch_info) {
                        /*
@@ -324,6 +323,7 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template,
                         */
                        he->branch_info = malloc(sizeof(*he->branch_info));
                        if (he->branch_info == NULL) {
+                               map__zput(he->ms.map);
                                free(he->stat_acc);
                                free(he);
                                return NULL;
@@ -332,17 +332,13 @@ static struct hist_entry *hist_entry__new(struct hist_entry *template,
                        memcpy(he->branch_info, template->branch_info,
                               sizeof(*he->branch_info));
 
-                       if (he->branch_info->from.map)
-                               he->branch_info->from.map->referenced = true;
-                       if (he->branch_info->to.map)
-                               he->branch_info->to.map->referenced = true;
+                       map__get(he->branch_info->from.map);
+                       map__get(he->branch_info->to.map);
                }
 
                if (he->mem_info) {
-                       if (he->mem_info->iaddr.map)
-                               he->mem_info->iaddr.map->referenced = true;
-                       if (he->mem_info->daddr.map)
-                               he->mem_info->daddr.map->referenced = true;
+                       map__get(he->mem_info->iaddr.map);
+                       map__get(he->mem_info->daddr.map);
                }
 
                if (symbol_conf.use_callchain)
@@ -362,10 +358,10 @@ static u8 symbol__parent_filter(const struct symbol *parent)
        return 0;
 }
 
-static struct hist_entry *add_hist_entry(struct hists *hists,
-                                        struct hist_entry *entry,
-                                        struct addr_location *al,
-                                        bool sample_self)
+static struct hist_entry *hists__findnew_entry(struct hists *hists,
+                                              struct hist_entry *entry,
+                                              struct addr_location *al,
+                                              bool sample_self)
 {
        struct rb_node **p;
        struct rb_node *parent = NULL;
@@ -407,9 +403,8 @@ static struct hist_entry *add_hist_entry(struct hists *hists,
                         * the history counter to increment.
                         */
                        if (he->ms.map != entry->ms.map) {
-                               he->ms.map = entry->ms.map;
-                               if (he->ms.map)
-                                       he->ms.map->referenced = true;
+                               map__put(he->ms.map);
+                               he->ms.map = map__get(entry->ms.map);
                        }
                        goto out;
                }
@@ -468,7 +463,7 @@ struct hist_entry *__hists__add_entry(struct hists *hists,
                .transaction = transaction,
        };
 
-       return add_hist_entry(hists, &entry, al, sample_self);
+       return hists__findnew_entry(hists, &entry, al, sample_self);
 }
 
 static int
@@ -548,9 +543,9 @@ iter_finish_mem_entry(struct hist_entry_iter *iter,
 
 out:
        /*
-        * We don't need to free iter->priv (mem_info) here since
-        * the mem info was either already freed in add_hist_entry() or
-        * passed to a new hist entry by hist_entry__new().
+        * We don't need to free iter->priv (mem_info) here since the mem info
+        * was either already freed in hists__findnew_entry() or passed to a
+        * new hist entry by hist_entry__new().
         */
        iter->priv = NULL;
 
@@ -851,19 +846,15 @@ const struct hist_iter_ops hist_iter_cumulative = {
 };
 
 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
-                        struct perf_evsel *evsel, struct perf_sample *sample,
                         int max_stack_depth, void *arg)
 {
        int err, err2;
 
-       err = sample__resolve_callchain(sample, &iter->parent, evsel, al,
-                                       max_stack_depth);
+       err = sample__resolve_callchain(iter->sample, &iter->parent,
+                                       iter->evsel, al, max_stack_depth);
        if (err)
                return err;
 
-       iter->evsel = evsel;
-       iter->sample = sample;
-
        err = iter->ops->prepare_entry(iter, al);
        if (err)
                goto out;
@@ -937,8 +928,20 @@ hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
 void hist_entry__delete(struct hist_entry *he)
 {
        thread__zput(he->thread);
-       zfree(&he->branch_info);
-       zfree(&he->mem_info);
+       map__zput(he->ms.map);
+
+       if (he->branch_info) {
+               map__zput(he->branch_info->from.map);
+               map__zput(he->branch_info->to.map);
+               zfree(&he->branch_info);
+       }
+
+       if (he->mem_info) {
+               map__zput(he->mem_info->iaddr.map);
+               map__zput(he->mem_info->daddr.map);
+               zfree(&he->mem_info);
+       }
+
        zfree(&he->stat_acc);
        free_srcline(he->srcline);
        free_callchain(he->callchain);
@@ -1163,7 +1166,7 @@ static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h
                return;
 
        /* force fold unfiltered entry for simplicity */
-       h->ms.unfolded = false;
+       h->unfolded = false;
        h->row_offset = 0;
        h->nr_rows = 0;
 
index 9f31b89a527a2e8f9d02da1993cdff13709b84c1..5ed8d9c229814d9c6942ce3528898bbd9de1cb79 100644 (file)
@@ -111,7 +111,6 @@ struct hist_entry *__hists__add_entry(struct hists *hists,
                                      u64 weight, u64 transaction,
                                      bool sample_self);
 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
-                        struct perf_evsel *evsel, struct perf_sample *sample,
                         int max_stack_depth, void *arg);
 
 int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right);
diff --git a/tools/perf/util/include/linux/kernel.h b/tools/perf/util/include/linux/kernel.h
deleted file mode 100644 (file)
index 09e8e7a..0000000
+++ /dev/null
@@ -1,107 +0,0 @@
-#ifndef PERF_LINUX_KERNEL_H_
-#define PERF_LINUX_KERNEL_H_
-
-#include <stdarg.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <assert.h>
-
-#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d))
-
-#define PERF_ALIGN(x, a)       __PERF_ALIGN_MASK(x, (typeof(x))(a)-1)
-#define __PERF_ALIGN_MASK(x, mask)     (((x)+(mask))&~(mask))
-
-#ifndef offsetof
-#define offsetof(TYPE, MEMBER) ((size_t) &((TYPE *)0)->MEMBER)
-#endif
-
-#ifndef container_of
-/**
- * container_of - cast a member of a structure out to the containing structure
- * @ptr:       the pointer to the member.
- * @type:      the type of the container struct this is embedded in.
- * @member:    the name of the member within the struct.
- *
- */
-#define container_of(ptr, type, member) ({                     \
-       const typeof(((type *)0)->member) * __mptr = (ptr);     \
-       (type *)((char *)__mptr - offsetof(type, member)); })
-#endif
-
-#define BUILD_BUG_ON_ZERO(e) (sizeof(struct { int:-!!(e); }))
-
-#ifndef max
-#define max(x, y) ({                           \
-       typeof(x) _max1 = (x);                  \
-       typeof(y) _max2 = (y);                  \
-       (void) (&_max1 == &_max2);              \
-       _max1 > _max2 ? _max1 : _max2; })
-#endif
-
-#ifndef min
-#define min(x, y) ({                           \
-       typeof(x) _min1 = (x);                  \
-       typeof(y) _min2 = (y);                  \
-       (void) (&_min1 == &_min2);              \
-       _min1 < _min2 ? _min1 : _min2; })
-#endif
-
-#ifndef roundup
-#define roundup(x, y) (                                \
-{                                                      \
-       const typeof(y) __y = y;                       \
-       (((x) + (__y - 1)) / __y) * __y;               \
-}                                                      \
-)
-#endif
-
-#ifndef BUG_ON
-#ifdef NDEBUG
-#define BUG_ON(cond) do { if (cond) {} } while (0)
-#else
-#define BUG_ON(cond) assert(!(cond))
-#endif
-#endif
-
-/*
- * Both need more care to handle endianness
- * (Don't use bitmap_copy_le() for now)
- */
-#define cpu_to_le64(x) (x)
-#define cpu_to_le32(x) (x)
-
-static inline int
-vscnprintf(char *buf, size_t size, const char *fmt, va_list args)
-{
-       int i;
-       ssize_t ssize = size;
-
-       i = vsnprintf(buf, size, fmt, args);
-
-       return (i >= ssize) ? (ssize - 1) : i;
-}
-
-static inline int scnprintf(char * buf, size_t size, const char * fmt, ...)
-{
-       va_list args;
-       ssize_t ssize = size;
-       int i;
-
-       va_start(args, fmt);
-       i = vsnprintf(buf, size, fmt, args);
-       va_end(args);
-
-       return (i >= ssize) ? (ssize - 1) : i;
-}
-
-/*
- * This looks more complex than it should be. But we need to
- * get the type for the ~ right in round_down (it needs to be
- * as wide as the result!), and we want to evaluate the macro
- * arguments just once each.
- */
-#define __round_mask(x, y) ((__typeof__(x))((y)-1))
-#define round_up(x, y) ((((x)-1) | __round_mask(x, y))+1)
-#define round_down(x, y) ((x) & ~__round_mask(x, y))
-
-#endif
diff --git a/tools/perf/util/include/linux/list.h b/tools/perf/util/include/linux/list.h
deleted file mode 100644 (file)
index 76ddbc7..0000000
+++ /dev/null
@@ -1,29 +0,0 @@
-#include <linux/kernel.h>
-#include <linux/types.h>
-
-#include "../../../../include/linux/list.h"
-
-#ifndef PERF_LIST_H
-#define PERF_LIST_H
-/**
- * list_del_range - deletes range of entries from list.
- * @begin: first element in the range to delete from the list.
- * @end: last element in the range to delete from the list.
- * Note: list_empty on the range of entries does not return true after this,
- * the entries is in an undefined state.
- */
-static inline void list_del_range(struct list_head *begin,
-                                 struct list_head *end)
-{
-       begin->prev->next = end->next;
-       end->next->prev = begin->prev;
-}
-
-/**
- * list_for_each_from  -       iterate over a list from one of its nodes
- * @pos:  the &struct list_head to use as a loop cursor, from where to start
- * @head: the head for your list.
- */
-#define list_for_each_from(pos, head) \
-       for (; pos != (head); pos = pos->next)
-#endif
diff --git a/tools/perf/util/include/linux/poison.h b/tools/perf/util/include/linux/poison.h
deleted file mode 100644 (file)
index fef6dbc..0000000
+++ /dev/null
@@ -1 +0,0 @@
-#include "../../../../include/linux/poison.h"
index 2a030c5af3aa2062082d87b1604b9980a1006844..f06d89f0b8678d8407c91db17e8c26250376c5a1 100644 (file)
@@ -1,2 +1,16 @@
+#ifndef __TOOLS_LINUX_PERF_RBTREE_H
+#define __TOOLS_LINUX_PERF_RBTREE_H
 #include <stdbool.h>
 #include "../../../../include/linux/rbtree.h"
+
+/*
+ * Handy for checking that we are not deleting an entry that is
+ * already in a list, found in block/{blk-throttle,cfq-iosched}.c,
+ * probably should be moved to lib/rbtree.c...
+ */
+static inline void rb_erase_init(struct rb_node *n, struct rb_root *root)
+{
+       rb_erase(n, root);
+       RB_CLEAR_NODE(n);
+}
+#endif /* __TOOLS_LINUX_PERF_RBTREE_H */
index 527e032e24f6e648e258b08b379e55fc6dcf8e5a..4744673aff1b287de3a091a40edade2a709a8e52 100644 (file)
 #include "unwind.h"
 #include "linux/hash.h"
 
+static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock);
+
 static void dsos__init(struct dsos *dsos)
 {
        INIT_LIST_HEAD(&dsos->head);
        dsos->root = RB_ROOT;
+       pthread_rwlock_init(&dsos->lock, NULL);
 }
 
 int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
 {
        map_groups__init(&machine->kmaps, machine);
        RB_CLEAR_NODE(&machine->rb_node);
-       dsos__init(&machine->user_dsos);
-       dsos__init(&machine->kernel_dsos);
+       dsos__init(&machine->dsos);
 
        machine->threads = RB_ROOT;
+       pthread_rwlock_init(&machine->threads_lock, NULL);
        INIT_LIST_HEAD(&machine->dead_threads);
        machine->last_match = NULL;
 
@@ -54,6 +57,7 @@ int machine__init(struct machine *machine, const char *root_dir, pid_t pid)
 
                snprintf(comm, sizeof(comm), "[guest/%d]", pid);
                thread__set_comm(thread, comm, 0);
+               thread__put(thread);
        }
 
        machine->current_tid = NULL;
@@ -78,37 +82,50 @@ out_delete:
        return NULL;
 }
 
-static void dsos__delete(struct dsos *dsos)
+static void dsos__purge(struct dsos *dsos)
 {
        struct dso *pos, *n;
 
+       pthread_rwlock_wrlock(&dsos->lock);
+
        list_for_each_entry_safe(pos, n, &dsos->head, node) {
                RB_CLEAR_NODE(&pos->rb_node);
-               list_del(&pos->node);
-               dso__delete(pos);
+               list_del_init(&pos->node);
+               dso__put(pos);
        }
+
+       pthread_rwlock_unlock(&dsos->lock);
+}
+
+static void dsos__exit(struct dsos *dsos)
+{
+       dsos__purge(dsos);
+       pthread_rwlock_destroy(&dsos->lock);
 }
 
 void machine__delete_threads(struct machine *machine)
 {
-       struct rb_node *nd = rb_first(&machine->threads);
+       struct rb_node *nd;
 
+       pthread_rwlock_wrlock(&machine->threads_lock);
+       nd = rb_first(&machine->threads);
        while (nd) {
                struct thread *t = rb_entry(nd, struct thread, rb_node);
 
                nd = rb_next(nd);
-               machine__remove_thread(machine, t);
+               __machine__remove_thread(machine, t, false);
        }
+       pthread_rwlock_unlock(&machine->threads_lock);
 }
 
 void machine__exit(struct machine *machine)
 {
        map_groups__exit(&machine->kmaps);
-       dsos__delete(&machine->user_dsos);
-       dsos__delete(&machine->kernel_dsos);
-       vdso__exit(machine);
+       dsos__exit(&machine->dsos);
+       machine__exit_vdso(machine);
        zfree(&machine->root_dir);
        zfree(&machine->current_tid);
+       pthread_rwlock_destroy(&machine->threads_lock);
 }
 
 void machine__delete(struct machine *machine)
@@ -303,7 +320,7 @@ static void machine__update_thread_pid(struct machine *machine,
        if (th->pid_ == th->tid)
                return;
 
-       leader = machine__findnew_thread(machine, th->pid_, th->pid_);
+       leader = __machine__findnew_thread(machine, th->pid_, th->pid_);
        if (!leader)
                goto out_err;
 
@@ -325,7 +342,7 @@ static void machine__update_thread_pid(struct machine *machine,
                if (!map_groups__empty(th->mg))
                        pr_err("Discarding thread maps for %d:%d\n",
                               th->pid_, th->tid);
-               map_groups__delete(th->mg);
+               map_groups__put(th->mg);
        }
 
        th->mg = map_groups__get(leader->mg);
@@ -336,9 +353,9 @@ out_err:
        pr_err("Failed to join map groups for %d:%d\n", th->pid_, th->tid);
 }
 
-static struct thread *__machine__findnew_thread(struct machine *machine,
-                                               pid_t pid, pid_t tid,
-                                               bool create)
+static struct thread *____machine__findnew_thread(struct machine *machine,
+                                                 pid_t pid, pid_t tid,
+                                                 bool create)
 {
        struct rb_node **p = &machine->threads.rb_node;
        struct rb_node *parent = NULL;
@@ -356,7 +373,7 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
                        return th;
                }
 
-               thread__zput(machine->last_match);
+               machine->last_match = NULL;
        }
 
        while (*p != NULL) {
@@ -364,7 +381,7 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
                th = rb_entry(parent, struct thread, rb_node);
 
                if (th->tid == tid) {
-                       machine->last_match = thread__get(th);
+                       machine->last_match = th;
                        machine__update_thread_pid(machine, th, pid);
                        return th;
                }
@@ -392,7 +409,8 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
                 * leader and that would screwed the rb tree.
                 */
                if (thread__init_map_groups(th, machine)) {
-                       rb_erase(&th->rb_node, &machine->threads);
+                       rb_erase_init(&th->rb_node, &machine->threads);
+                       RB_CLEAR_NODE(&th->rb_node);
                        thread__delete(th);
                        return NULL;
                }
@@ -400,22 +418,36 @@ static struct thread *__machine__findnew_thread(struct machine *machine,
                 * It is now in the rbtree, get a ref
                 */
                thread__get(th);
-               machine->last_match = thread__get(th);
+               machine->last_match = th;
        }
 
        return th;
 }
 
+struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid)
+{
+       return ____machine__findnew_thread(machine, pid, tid, true);
+}
+
 struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
                                       pid_t tid)
 {
-       return __machine__findnew_thread(machine, pid, tid, true);
+       struct thread *th;
+
+       pthread_rwlock_wrlock(&machine->threads_lock);
+       th = thread__get(__machine__findnew_thread(machine, pid, tid));
+       pthread_rwlock_unlock(&machine->threads_lock);
+       return th;
 }
 
 struct thread *machine__find_thread(struct machine *machine, pid_t pid,
                                    pid_t tid)
 {
-       return __machine__findnew_thread(machine, pid, tid, false);
+       struct thread *th;
+       pthread_rwlock_rdlock(&machine->threads_lock);
+       th =  thread__get(____machine__findnew_thread(machine, pid, tid, false));
+       pthread_rwlock_unlock(&machine->threads_lock);
+       return th;
 }
 
 struct comm *machine__thread_exec_comm(struct machine *machine,
@@ -434,6 +466,7 @@ int machine__process_comm_event(struct machine *machine, union perf_event *event
                                                        event->comm.pid,
                                                        event->comm.tid);
        bool exec = event->header.misc & PERF_RECORD_MISC_COMM_EXEC;
+       int err = 0;
 
        if (exec)
                machine->comm_exec = true;
@@ -444,10 +477,12 @@ int machine__process_comm_event(struct machine *machine, union perf_event *event
        if (thread == NULL ||
            __thread__set_comm(thread, event->comm.comm, sample->time, exec)) {
                dump_printf("problem processing PERF_RECORD_COMM, skipping event.\n");
-               return -1;
+               err = -1;
        }
 
-       return 0;
+       thread__put(thread);
+
+       return err;
 }
 
 int machine__process_lost_event(struct machine *machine __maybe_unused,
@@ -458,17 +493,27 @@ int machine__process_lost_event(struct machine *machine __maybe_unused,
        return 0;
 }
 
-static struct dso*
-machine__module_dso(struct machine *machine, struct kmod_path *m,
-                   const char *filename)
+int machine__process_lost_samples_event(struct machine *machine __maybe_unused,
+                                       union perf_event *event, struct perf_sample *sample)
+{
+       dump_printf(": id:%" PRIu64 ": lost samples :%" PRIu64 "\n",
+                   sample->id, event->lost_samples.lost);
+       return 0;
+}
+
+static struct dso *machine__findnew_module_dso(struct machine *machine,
+                                              struct kmod_path *m,
+                                              const char *filename)
 {
        struct dso *dso;
 
-       dso = dsos__find(&machine->kernel_dsos, m->name, true);
+       pthread_rwlock_wrlock(&machine->dsos.lock);
+
+       dso = __dsos__find(&machine->dsos, m->name, true);
        if (!dso) {
-               dso = dsos__addnew(&machine->kernel_dsos, m->name);
+               dso = __dsos__addnew(&machine->dsos, m->name);
                if (dso == NULL)
-                       return NULL;
+                       goto out_unlock;
 
                if (machine__is_host(machine))
                        dso->symtab_type = DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE;
@@ -483,11 +528,30 @@ machine__module_dso(struct machine *machine, struct kmod_path *m,
                dso__set_long_name(dso, strdup(filename), true);
        }
 
+       dso__get(dso);
+out_unlock:
+       pthread_rwlock_unlock(&machine->dsos.lock);
        return dso;
 }
 
-struct map *machine__new_module(struct machine *machine, u64 start,
-                               const char *filename)
+int machine__process_aux_event(struct machine *machine __maybe_unused,
+                              union perf_event *event)
+{
+       if (dump_trace)
+               perf_event__fprintf_aux(event, stdout);
+       return 0;
+}
+
+int machine__process_itrace_start_event(struct machine *machine __maybe_unused,
+                                       union perf_event *event)
+{
+       if (dump_trace)
+               perf_event__fprintf_itrace_start(event, stdout);
+       return 0;
+}
+
+struct map *machine__findnew_module_map(struct machine *machine, u64 start,
+                                       const char *filename)
 {
        struct map *map = NULL;
        struct dso *dso;
@@ -501,7 +565,7 @@ struct map *machine__new_module(struct machine *machine, u64 start,
        if (map)
                goto out;
 
-       dso = machine__module_dso(machine, &m, filename);
+       dso = machine__findnew_module_dso(machine, &m, filename);
        if (dso == NULL)
                goto out;
 
@@ -519,13 +583,11 @@ out:
 size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
 {
        struct rb_node *nd;
-       size_t ret = __dsos__fprintf(&machines->host.kernel_dsos.head, fp) +
-                    __dsos__fprintf(&machines->host.user_dsos.head, fp);
+       size_t ret = __dsos__fprintf(&machines->host.dsos.head, fp);
 
        for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
                struct machine *pos = rb_entry(nd, struct machine, rb_node);
-               ret += __dsos__fprintf(&pos->kernel_dsos.head, fp);
-               ret += __dsos__fprintf(&pos->user_dsos.head, fp);
+               ret += __dsos__fprintf(&pos->dsos.head, fp);
        }
 
        return ret;
@@ -534,8 +596,7 @@ size_t machines__fprintf_dsos(struct machines *machines, FILE *fp)
 size_t machine__fprintf_dsos_buildid(struct machine *m, FILE *fp,
                                     bool (skip)(struct dso *dso, int parm), int parm)
 {
-       return __dsos__fprintf_buildid(&m->kernel_dsos.head, fp, skip, parm) +
-              __dsos__fprintf_buildid(&m->user_dsos.head, fp, skip, parm);
+       return __dsos__fprintf_buildid(&m->dsos.head, fp, skip, parm);
 }
 
 size_t machines__fprintf_dsos_buildid(struct machines *machines, FILE *fp,
@@ -575,12 +636,16 @@ size_t machine__fprintf(struct machine *machine, FILE *fp)
        size_t ret = 0;
        struct rb_node *nd;
 
+       pthread_rwlock_rdlock(&machine->threads_lock);
+
        for (nd = rb_first(&machine->threads); nd; nd = rb_next(nd)) {
                struct thread *pos = rb_entry(nd, struct thread, rb_node);
 
                ret += thread__fprintf(pos, fp);
        }
 
+       pthread_rwlock_unlock(&machine->threads_lock);
+
        return ret;
 }
 
@@ -594,9 +659,8 @@ static struct dso *machine__get_kernel(struct machine *machine)
                if (!vmlinux_name)
                        vmlinux_name = "[kernel.kallsyms]";
 
-               kernel = dso__kernel_findnew(machine, vmlinux_name,
-                                            "[kernel]",
-                                            DSO_TYPE_KERNEL);
+               kernel = machine__findnew_kernel(machine, vmlinux_name,
+                                                "[kernel]", DSO_TYPE_KERNEL);
        } else {
                char bf[PATH_MAX];
 
@@ -606,9 +670,9 @@ static struct dso *machine__get_kernel(struct machine *machine)
                        vmlinux_name = machine__mmap_name(machine, bf,
                                                          sizeof(bf));
 
-               kernel = dso__kernel_findnew(machine, vmlinux_name,
-                                            "[guest.kernel]",
-                                            DSO_TYPE_GUEST_KERNEL);
+               kernel = machine__findnew_kernel(machine, vmlinux_name,
+                                                "[guest.kernel]",
+                                                DSO_TYPE_GUEST_KERNEL);
        }
 
        if (kernel != NULL && (!kernel->has_build_id))
@@ -713,7 +777,6 @@ void machine__destroy_kernel_maps(struct machine *machine)
                                kmap->ref_reloc_sym = NULL;
                }
 
-               map__delete(machine->vmlinux_maps[type]);
                machine->vmlinux_maps[type] = NULL;
        }
 }
@@ -970,7 +1033,7 @@ static int machine__create_module(void *arg, const char *name, u64 start)
        struct machine *machine = arg;
        struct map *map;
 
-       map = machine__new_module(machine, start, name);
+       map = machine__findnew_module_map(machine, start, name);
        if (map == NULL)
                return -1;
 
@@ -1062,7 +1125,7 @@ static bool machine__uses_kcore(struct machine *machine)
 {
        struct dso *dso;
 
-       list_for_each_entry(dso, &machine->kernel_dsos.head, node) {
+       list_for_each_entry(dso, &machine->dsos.head, node) {
                if (dso__is_kcore(dso))
                        return true;
        }
@@ -1093,8 +1156,8 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
                                strlen(kmmap_prefix) - 1) == 0;
        if (event->mmap.filename[0] == '/' ||
            (!is_kernel_mmap && event->mmap.filename[0] == '[')) {
-               map = machine__new_module(machine, event->mmap.start,
-                                         event->mmap.filename);
+               map = machine__findnew_module_map(machine, event->mmap.start,
+                                                 event->mmap.filename);
                if (map == NULL)
                        goto out_problem;
 
@@ -1109,23 +1172,48 @@ static int machine__process_kernel_mmap_event(struct machine *machine,
                struct dso *kernel = NULL;
                struct dso *dso;
 
-               list_for_each_entry(dso, &machine->kernel_dsos.head, node) {
-                       if (is_kernel_module(dso->long_name))
+               pthread_rwlock_rdlock(&machine->dsos.lock);
+
+               list_for_each_entry(dso, &machine->dsos.head, node) {
+
+                       /*
+                        * The cpumode passed to is_kernel_module is not the
+                        * cpumode of *this* event. If we insist on passing
+                        * correct cpumode to is_kernel_module, we should
+                        * record the cpumode when we adding this dso to the
+                        * linked list.
+                        *
+                        * However we don't really need passing correct
+                        * cpumode.  We know the correct cpumode must be kernel
+                        * mode (if not, we should not link it onto kernel_dsos
+                        * list).
+                        *
+                        * Therefore, we pass PERF_RECORD_MISC_CPUMODE_UNKNOWN.
+                        * is_kernel_module() treats it as a kernel cpumode.
+                        */
+
+                       if (!dso->kernel ||
+                           is_kernel_module(dso->long_name,
+                                            PERF_RECORD_MISC_CPUMODE_UNKNOWN))
                                continue;
 
+
                        kernel = dso;
                        break;
                }
 
+               pthread_rwlock_unlock(&machine->dsos.lock);
+
                if (kernel == NULL)
-                       kernel = __dsos__findnew(&machine->kernel_dsos,
-                                                kmmap_prefix);
+                       kernel = machine__findnew_dso(machine, kmmap_prefix);
                if (kernel == NULL)
                        goto out_problem;
 
                kernel->kernel = kernel_type;
-               if (__machine__create_kernel_maps(machine, kernel) < 0)
+               if (__machine__create_kernel_maps(machine, kernel) < 0) {
+                       dso__put(kernel);
                        goto out_problem;
+               }
 
                if (strstr(kernel->long_name, "vmlinux"))
                        dso__set_short_name(kernel, "[kernel.vmlinux]", false);
@@ -1197,11 +1285,15 @@ int machine__process_mmap2_event(struct machine *machine,
                        event->mmap2.filename, type, thread);
 
        if (map == NULL)
-               goto out_problem;
+               goto out_problem_map;
 
        thread__insert_map(thread, map);
+       thread__put(thread);
+       map__put(map);
        return 0;
 
+out_problem_map:
+       thread__put(thread);
 out_problem:
        dump_printf("problem processing PERF_RECORD_MMAP2, skipping event.\n");
        return 0;
@@ -1244,31 +1336,46 @@ int machine__process_mmap_event(struct machine *machine, union perf_event *event
                        type, thread);
 
        if (map == NULL)
-               goto out_problem;
+               goto out_problem_map;
 
        thread__insert_map(thread, map);
+       thread__put(thread);
+       map__put(map);
        return 0;
 
+out_problem_map:
+       thread__put(thread);
 out_problem:
        dump_printf("problem processing PERF_RECORD_MMAP, skipping event.\n");
        return 0;
 }
 
-void machine__remove_thread(struct machine *machine, struct thread *th)
+static void __machine__remove_thread(struct machine *machine, struct thread *th, bool lock)
 {
        if (machine->last_match == th)
-               thread__zput(machine->last_match);
+               machine->last_match = NULL;
 
-       rb_erase(&th->rb_node, &machine->threads);
+       BUG_ON(atomic_read(&th->refcnt) == 0);
+       if (lock)
+               pthread_rwlock_wrlock(&machine->threads_lock);
+       rb_erase_init(&th->rb_node, &machine->threads);
+       RB_CLEAR_NODE(&th->rb_node);
        /*
         * Move it first to the dead_threads list, then drop the reference,
         * if this is the last reference, then the thread__delete destructor
         * will be called and we will remove it from the dead_threads list.
         */
        list_add_tail(&th->node, &machine->dead_threads);
+       if (lock)
+               pthread_rwlock_unlock(&machine->threads_lock);
        thread__put(th);
 }
 
+void machine__remove_thread(struct machine *machine, struct thread *th)
+{
+       return __machine__remove_thread(machine, th, true);
+}
+
 int machine__process_fork_event(struct machine *machine, union perf_event *event,
                                struct perf_sample *sample)
 {
@@ -1278,10 +1385,13 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
        struct thread *parent = machine__findnew_thread(machine,
                                                        event->fork.ppid,
                                                        event->fork.ptid);
+       int err = 0;
 
        /* if a thread currently exists for the thread id remove it */
-       if (thread != NULL)
+       if (thread != NULL) {
                machine__remove_thread(machine, thread);
+               thread__put(thread);
+       }
 
        thread = machine__findnew_thread(machine, event->fork.pid,
                                         event->fork.tid);
@@ -1291,10 +1401,12 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
        if (thread == NULL || parent == NULL ||
            thread__fork(thread, parent, sample->time) < 0) {
                dump_printf("problem processing PERF_RECORD_FORK, skipping event.\n");
-               return -1;
+               err = -1;
        }
+       thread__put(thread);
+       thread__put(parent);
 
-       return 0;
+       return err;
 }
 
 int machine__process_exit_event(struct machine *machine, union perf_event *event,
@@ -1307,8 +1419,10 @@ int machine__process_exit_event(struct machine *machine, union perf_event *event
        if (dump_trace)
                perf_event__fprintf_task(event, stdout);
 
-       if (thread != NULL)
+       if (thread != NULL) {
                thread__exited(thread);
+               thread__put(thread);
+       }
 
        return 0;
 }
@@ -1331,6 +1445,13 @@ int machine__process_event(struct machine *machine, union perf_event *event,
                ret = machine__process_exit_event(machine, event, sample); break;
        case PERF_RECORD_LOST:
                ret = machine__process_lost_event(machine, event, sample); break;
+       case PERF_RECORD_AUX:
+               ret = machine__process_aux_event(machine, event); break;
+       case PERF_RECORD_ITRACE_START:
+               ret = machine__process_itrace_start_event(machine, event);
+       case PERF_RECORD_LOST_SAMPLES:
+               ret = machine__process_lost_samples_event(machine, event, sample); break;
+               break;
        default:
                ret = -1;
                break;
@@ -1769,14 +1890,36 @@ int machine__for_each_thread(struct machine *machine,
        return rc;
 }
 
+int machines__for_each_thread(struct machines *machines,
+                             int (*fn)(struct thread *thread, void *p),
+                             void *priv)
+{
+       struct rb_node *nd;
+       int rc = 0;
+
+       rc = machine__for_each_thread(&machines->host, fn, priv);
+       if (rc != 0)
+               return rc;
+
+       for (nd = rb_first(&machines->guests); nd; nd = rb_next(nd)) {
+               struct machine *machine = rb_entry(nd, struct machine, rb_node);
+
+               rc = machine__for_each_thread(machine, fn, priv);
+               if (rc != 0)
+                       return rc;
+       }
+       return rc;
+}
+
 int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
                                  struct target *target, struct thread_map *threads,
-                                 perf_event__handler_t process, bool data_mmap)
+                                 perf_event__handler_t process, bool data_mmap,
+                                 unsigned int proc_map_timeout)
 {
        if (target__has_task(target))
-               return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap);
+               return perf_event__synthesize_thread_map(tool, threads, process, machine, data_mmap, proc_map_timeout);
        else if (target__has_cpu(target))
-               return perf_event__synthesize_threads(tool, process, machine, data_mmap);
+               return perf_event__synthesize_threads(tool, process, machine, data_mmap, proc_map_timeout);
        /* command specified */
        return 0;
 }
@@ -1820,6 +1963,7 @@ int machine__set_current_tid(struct machine *machine, int cpu, pid_t pid,
                return -ENOMEM;
 
        thread->cpu = cpu;
+       thread__put(thread);
 
        return 0;
 }
@@ -1845,3 +1989,8 @@ int machine__get_kernel_start(struct machine *machine)
        }
        return err;
 }
+
+struct dso *machine__findnew_dso(struct machine *machine, const char *filename)
+{
+       return dsos__findnew(&machine->dsos, filename);
+}
index 6d64cedb9d1e8f6fb255068b27761f6c52735b5e..887798e511e9f3dd382109927860269b03744246 100644 (file)
@@ -30,11 +30,11 @@ struct machine {
        bool              comm_exec;
        char              *root_dir;
        struct rb_root    threads;
+       pthread_rwlock_t  threads_lock;
        struct list_head  dead_threads;
        struct thread     *last_match;
        struct vdso_info  *vdso_info;
-       struct dsos       user_dsos;
-       struct dsos       kernel_dsos;
+       struct dsos       dsos;
        struct map_groups kmaps;
        struct map        *vmlinux_maps[MAP__NR_TYPES];
        u64               kernel_start;
@@ -81,6 +81,12 @@ int machine__process_fork_event(struct machine *machine, union perf_event *event
                                struct perf_sample *sample);
 int machine__process_lost_event(struct machine *machine, union perf_event *event,
                                struct perf_sample *sample);
+int machine__process_lost_samples_event(struct machine *machine, union perf_event *event,
+                                       struct perf_sample *sample);
+int machine__process_aux_event(struct machine *machine,
+                              union perf_event *event);
+int machine__process_itrace_start_event(struct machine *machine,
+                                       union perf_event *event);
 int machine__process_mmap_event(struct machine *machine, union perf_event *event,
                                struct perf_sample *sample);
 int machine__process_mmap2_event(struct machine *machine, union perf_event *event,
@@ -147,8 +153,10 @@ static inline bool machine__is_host(struct machine *machine)
        return machine ? machine->pid == HOST_KERNEL_ID : false;
 }
 
-struct thread *machine__findnew_thread(struct machine *machine, pid_t pid,
-                                      pid_t tid);
+struct thread *__machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid);
+struct thread *machine__findnew_thread(struct machine *machine, pid_t pid, pid_t tid);
+
+struct dso *machine__findnew_dso(struct machine *machine, const char *filename);
 
 size_t machine__fprintf(struct machine *machine, FILE *fp);
 
@@ -181,8 +189,8 @@ struct symbol *machine__find_kernel_function_by_name(struct machine *machine,
                                                 filter);
 }
 
-struct map *machine__new_module(struct machine *machine, u64 start,
-                               const char *filename);
+struct map *machine__findnew_module_map(struct machine *machine, u64 start,
+                                       const char *filename);
 
 int machine__load_kallsyms(struct machine *machine, const char *filename,
                           enum map_type type, symbol_filter_t filter);
@@ -208,16 +216,22 @@ size_t machine__fprintf_vmlinux_path(struct machine *machine, FILE *fp);
 int machine__for_each_thread(struct machine *machine,
                             int (*fn)(struct thread *thread, void *p),
                             void *priv);
+int machines__for_each_thread(struct machines *machines,
+                             int (*fn)(struct thread *thread, void *p),
+                             void *priv);
 
 int __machine__synthesize_threads(struct machine *machine, struct perf_tool *tool,
                                  struct target *target, struct thread_map *threads,
-                                 perf_event__handler_t process, bool data_mmap);
+                                 perf_event__handler_t process, bool data_mmap,
+                                 unsigned int proc_map_timeout);
 static inline
 int machine__synthesize_threads(struct machine *machine, struct target *target,
-                               struct thread_map *threads, bool data_mmap)
+                               struct thread_map *threads, bool data_mmap,
+                               unsigned int proc_map_timeout)
 {
        return __machine__synthesize_threads(machine, NULL, target, threads,
-                                            perf_event__process, data_mmap);
+                                            perf_event__process, data_mmap,
+                                            proc_map_timeout);
 }
 
 pid_t machine__get_current_tid(struct machine *machine, int cpu);
index a14f08f416863944527412b82a6b3cfaeda3c603..b5a5e9c024379652fccd722d7da2739169e0ab5d 100644 (file)
@@ -16,6 +16,8 @@
 #include "machine.h"
 #include <linux/string.h>
 
+static void __maps__insert(struct maps *maps, struct map *map);
+
 const char *map_type__name[MAP__NR_TYPES] = {
        [MAP__FUNCTION] = "Functions",
        [MAP__VARIABLE] = "Variables",
@@ -130,13 +132,13 @@ void map__init(struct map *map, enum map_type type,
        map->end      = end;
        map->pgoff    = pgoff;
        map->reloc    = 0;
-       map->dso      = dso;
+       map->dso      = dso__get(dso);
        map->map_ip   = map__map_ip;
        map->unmap_ip = map__unmap_ip;
        RB_CLEAR_NODE(&map->rb_node);
        map->groups   = NULL;
-       map->referenced = false;
        map->erange_warned = false;
+       atomic_set(&map->refcnt, 1);
 }
 
 struct map *map__new(struct machine *machine, u64 start, u64 len,
@@ -175,9 +177,9 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
 
                if (vdso) {
                        pgoff = 0;
-                       dso = vdso__dso_findnew(machine, thread);
+                       dso = machine__findnew_vdso(machine, thread);
                } else
-                       dso = __dsos__findnew(&machine->user_dsos, filename);
+                       dso = machine__findnew_dso(machine, filename);
 
                if (dso == NULL)
                        goto out_delete;
@@ -195,6 +197,7 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
                        if (type != MAP__FUNCTION)
                                dso__set_loaded(dso, map->type);
                }
+               dso__put(dso);
        }
        return map;
 out_delete:
@@ -221,11 +224,24 @@ struct map *map__new2(u64 start, struct dso *dso, enum map_type type)
        return map;
 }
 
+static void map__exit(struct map *map)
+{
+       BUG_ON(!RB_EMPTY_NODE(&map->rb_node));
+       dso__zput(map->dso);
+}
+
 void map__delete(struct map *map)
 {
+       map__exit(map);
        free(map);
 }
 
+void map__put(struct map *map)
+{
+       if (map && atomic_dec_and_test(&map->refcnt))
+               map__delete(map);
+}
+
 void map__fixup_start(struct map *map)
 {
        struct rb_root *symbols = &map->dso->symbols[map->type];
@@ -292,6 +308,11 @@ int map__load(struct map *map, symbol_filter_t filter)
        return 0;
 }
 
+int __weak arch__compare_symbol_names(const char *namea, const char *nameb)
+{
+       return strcmp(namea, nameb);
+}
+
 struct symbol *map__find_symbol(struct map *map, u64 addr,
                                symbol_filter_t filter)
 {
@@ -413,48 +434,49 @@ u64 map__objdump_2mem(struct map *map, u64 ip)
        return ip + map->reloc;
 }
 
+static void maps__init(struct maps *maps)
+{
+       maps->entries = RB_ROOT;
+       pthread_rwlock_init(&maps->lock, NULL);
+}
+
 void map_groups__init(struct map_groups *mg, struct machine *machine)
 {
        int i;
        for (i = 0; i < MAP__NR_TYPES; ++i) {
-               mg->maps[i] = RB_ROOT;
-               INIT_LIST_HEAD(&mg->removed_maps[i]);
+               maps__init(&mg->maps[i]);
        }
        mg->machine = machine;
-       mg->refcnt = 1;
+       atomic_set(&mg->refcnt, 1);
 }
 
-static void maps__delete(struct rb_root *maps)
+static void __maps__purge(struct maps *maps)
 {
-       struct rb_node *next = rb_first(maps);
+       struct rb_root *root = &maps->entries;
+       struct rb_node *next = rb_first(root);
 
        while (next) {
                struct map *pos = rb_entry(next, struct map, rb_node);
 
                next = rb_next(&pos->rb_node);
-               rb_erase(&pos->rb_node, maps);
-               map__delete(pos);
+               rb_erase_init(&pos->rb_node, root);
+               map__put(pos);
        }
 }
 
-static void maps__delete_removed(struct list_head *maps)
+static void maps__exit(struct maps *maps)
 {
-       struct map *pos, *n;
-
-       list_for_each_entry_safe(pos, n, maps, node) {
-               list_del(&pos->node);
-               map__delete(pos);
-       }
+       pthread_rwlock_wrlock(&maps->lock);
+       __maps__purge(maps);
+       pthread_rwlock_unlock(&maps->lock);
 }
 
 void map_groups__exit(struct map_groups *mg)
 {
        int i;
 
-       for (i = 0; i < MAP__NR_TYPES; ++i) {
-               maps__delete(&mg->maps[i]);
-               maps__delete_removed(&mg->removed_maps[i]);
-       }
+       for (i = 0; i < MAP__NR_TYPES; ++i)
+               maps__exit(&mg->maps[i]);
 }
 
 bool map_groups__empty(struct map_groups *mg)
@@ -464,8 +486,6 @@ bool map_groups__empty(struct map_groups *mg)
        for (i = 0; i < MAP__NR_TYPES; ++i) {
                if (maps__first(&mg->maps[i]))
                        return false;
-               if (!list_empty(&mg->removed_maps[i]))
-                       return false;
        }
 
        return true;
@@ -489,32 +509,10 @@ void map_groups__delete(struct map_groups *mg)
 
 void map_groups__put(struct map_groups *mg)
 {
-       if (--mg->refcnt == 0)
+       if (mg && atomic_dec_and_test(&mg->refcnt))
                map_groups__delete(mg);
 }
 
-void map_groups__flush(struct map_groups *mg)
-{
-       int type;
-
-       for (type = 0; type < MAP__NR_TYPES; type++) {
-               struct rb_root *root = &mg->maps[type];
-               struct rb_node *next = rb_first(root);
-
-               while (next) {
-                       struct map *pos = rb_entry(next, struct map, rb_node);
-                       next = rb_next(&pos->rb_node);
-                       rb_erase(&pos->rb_node, root);
-                       /*
-                        * We may have references to this map, for
-                        * instance in some hist_entry instances, so
-                        * just move them to a separate list.
-                        */
-                       list_add_tail(&pos->node, &mg->removed_maps[pos->type]);
-               }
-       }
-}
-
 struct symbol *map_groups__find_symbol(struct map_groups *mg,
                                       enum map_type type, u64 addr,
                                       struct map **mapp,
@@ -538,20 +536,28 @@ struct symbol *map_groups__find_symbol_by_name(struct map_groups *mg,
                                               struct map **mapp,
                                               symbol_filter_t filter)
 {
+       struct maps *maps = &mg->maps[type];
+       struct symbol *sym;
        struct rb_node *nd;
 
-       for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
+       pthread_rwlock_rdlock(&maps->lock);
+
+       for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
                struct map *pos = rb_entry(nd, struct map, rb_node);
-               struct symbol *sym = map__find_symbol_by_name(pos, name, filter);
+
+               sym = map__find_symbol_by_name(pos, name, filter);
 
                if (sym == NULL)
                        continue;
                if (mapp != NULL)
                        *mapp = pos;
-               return sym;
+               goto out;
        }
 
-       return NULL;
+       sym = NULL;
+out:
+       pthread_rwlock_unlock(&maps->lock);
+       return sym;
 }
 
 int map_groups__find_ams(struct addr_map_symbol *ams, symbol_filter_t filter)
@@ -571,73 +577,54 @@ int map_groups__find_ams(struct addr_map_symbol *ams, symbol_filter_t filter)
        return ams->sym ? 0 : -1;
 }
 
-size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type,
-                                 FILE *fp)
+static size_t maps__fprintf(struct maps *maps, FILE *fp)
 {
-       size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
+       size_t printed = 0;
        struct rb_node *nd;
 
-       for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
+       pthread_rwlock_rdlock(&maps->lock);
+
+       for (nd = rb_first(&maps->entries); nd; nd = rb_next(nd)) {
                struct map *pos = rb_entry(nd, struct map, rb_node);
                printed += fprintf(fp, "Map:");
                printed += map__fprintf(pos, fp);
                if (verbose > 2) {
-                       printed += dso__fprintf(pos->dso, type, fp);
+                       printed += dso__fprintf(pos->dso, pos->type, fp);
                        printed += fprintf(fp, "--\n");
                }
        }
 
-       return printed;
-}
+       pthread_rwlock_unlock(&maps->lock);
 
-static size_t map_groups__fprintf_maps(struct map_groups *mg, FILE *fp)
-{
-       size_t printed = 0, i;
-       for (i = 0; i < MAP__NR_TYPES; ++i)
-               printed += __map_groups__fprintf_maps(mg, i, fp);
        return printed;
 }
 
-static size_t __map_groups__fprintf_removed_maps(struct map_groups *mg,
-                                                enum map_type type, FILE *fp)
+size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type,
+                                 FILE *fp)
 {
-       struct map *pos;
-       size_t printed = 0;
-
-       list_for_each_entry(pos, &mg->removed_maps[type], node) {
-               printed += fprintf(fp, "Map:");
-               printed += map__fprintf(pos, fp);
-               if (verbose > 1) {
-                       printed += dso__fprintf(pos->dso, type, fp);
-                       printed += fprintf(fp, "--\n");
-               }
-       }
-       return printed;
+       size_t printed = fprintf(fp, "%s:\n", map_type__name[type]);
+       return printed += maps__fprintf(&mg->maps[type], fp);
 }
 
-static size_t map_groups__fprintf_removed_maps(struct map_groups *mg,
-                                              FILE *fp)
+size_t map_groups__fprintf(struct map_groups *mg, FILE *fp)
 {
        size_t printed = 0, i;
        for (i = 0; i < MAP__NR_TYPES; ++i)
-               printed += __map_groups__fprintf_removed_maps(mg, i, fp);
+               printed += __map_groups__fprintf_maps(mg, i, fp);
        return printed;
 }
 
-size_t map_groups__fprintf(struct map_groups *mg, FILE *fp)
+static int maps__fixup_overlappings(struct maps *maps, struct map *map, FILE *fp)
 {
-       size_t printed = map_groups__fprintf_maps(mg, fp);
-       printed += fprintf(fp, "Removed maps:\n");
-       return printed + map_groups__fprintf_removed_maps(mg, fp);
-}
-
-int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
-                                  FILE *fp)
-{
-       struct rb_root *root = &mg->maps[map->type];
-       struct rb_node *next = rb_first(root);
+       struct rb_root *root;
+       struct rb_node *next;
        int err = 0;
 
+       pthread_rwlock_wrlock(&maps->lock);
+
+       root = &maps->entries;
+       next = rb_first(root);
+
        while (next) {
                struct map *pos = rb_entry(next, struct map, rb_node);
                next = rb_next(&pos->rb_node);
@@ -651,7 +638,7 @@ int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
                        map__fprintf(pos, fp);
                }
 
-               rb_erase(&pos->rb_node, root);
+               rb_erase_init(&pos->rb_node, root);
                /*
                 * Now check if we need to create new maps for areas not
                 * overlapped by the new map:
@@ -661,11 +648,11 @@ int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
 
                        if (before == NULL) {
                                err = -ENOMEM;
-                               goto move_map;
+                               goto put_map;
                        }
 
                        before->end = map->start;
-                       map_groups__insert(mg, before);
+                       __maps__insert(maps, before);
                        if (verbose >= 2)
                                map__fprintf(before, fp);
                }
@@ -675,28 +662,31 @@ int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
 
                        if (after == NULL) {
                                err = -ENOMEM;
-                               goto move_map;
+                               goto put_map;
                        }
 
                        after->start = map->end;
-                       map_groups__insert(mg, after);
+                       __maps__insert(maps, after);
                        if (verbose >= 2)
                                map__fprintf(after, fp);
                }
-move_map:
-               /*
-                * If we have references, just move them to a separate list.
-                */
-               if (pos->referenced)
-                       list_add_tail(&pos->node, &mg->removed_maps[map->type]);
-               else
-                       map__delete(pos);
+put_map:
+               map__put(pos);
 
                if (err)
-                       return err;
+                       goto out;
        }
 
-       return 0;
+       err = 0;
+out:
+       pthread_rwlock_unlock(&maps->lock);
+       return err;
+}
+
+int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
+                                  FILE *fp)
+{
+       return maps__fixup_overlappings(&mg->maps[map->type], map, fp);
 }
 
 /*
@@ -705,20 +695,28 @@ move_map:
 int map_groups__clone(struct map_groups *mg,
                      struct map_groups *parent, enum map_type type)
 {
-       struct rb_node *nd;
-       for (nd = rb_first(&parent->maps[type]); nd; nd = rb_next(nd)) {
-               struct map *map = rb_entry(nd, struct map, rb_node);
+       int err = -ENOMEM;
+       struct map *map;
+       struct maps *maps = &parent->maps[type];
+
+       pthread_rwlock_rdlock(&maps->lock);
+
+       for (map = maps__first(maps); map; map = map__next(map)) {
                struct map *new = map__clone(map);
                if (new == NULL)
-                       return -ENOMEM;
+                       goto out_unlock;
                map_groups__insert(mg, new);
        }
-       return 0;
+
+       err = 0;
+out_unlock:
+       pthread_rwlock_unlock(&maps->lock);
+       return err;
 }
 
-void maps__insert(struct rb_root *maps, struct map *map)
+static void __maps__insert(struct maps *maps, struct map *map)
 {
-       struct rb_node **p = &maps->rb_node;
+       struct rb_node **p = &maps->entries.rb_node;
        struct rb_node *parent = NULL;
        const u64 ip = map->start;
        struct map *m;
@@ -733,20 +731,38 @@ void maps__insert(struct rb_root *maps, struct map *map)
        }
 
        rb_link_node(&map->rb_node, parent, p);
-       rb_insert_color(&map->rb_node, maps);
+       rb_insert_color(&map->rb_node, &maps->entries);
+       map__get(map);
 }
 
-void maps__remove(struct rb_root *maps, struct map *map)
+void maps__insert(struct maps *maps, struct map *map)
 {
-       rb_erase(&map->rb_node, maps);
+       pthread_rwlock_wrlock(&maps->lock);
+       __maps__insert(maps, map);
+       pthread_rwlock_unlock(&maps->lock);
 }
 
-struct map *maps__find(struct rb_root *maps, u64 ip)
+static void __maps__remove(struct maps *maps, struct map *map)
 {
-       struct rb_node **p = &maps->rb_node;
-       struct rb_node *parent = NULL;
+       rb_erase_init(&map->rb_node, &maps->entries);
+       map__put(map);
+}
+
+void maps__remove(struct maps *maps, struct map *map)
+{
+       pthread_rwlock_wrlock(&maps->lock);
+       __maps__remove(maps, map);
+       pthread_rwlock_unlock(&maps->lock);
+}
+
+struct map *maps__find(struct maps *maps, u64 ip)
+{
+       struct rb_node **p, *parent = NULL;
        struct map *m;
 
+       pthread_rwlock_rdlock(&maps->lock);
+
+       p = &maps->entries.rb_node;
        while (*p != NULL) {
                parent = *p;
                m = rb_entry(parent, struct map, rb_node);
@@ -755,22 +771,25 @@ struct map *maps__find(struct rb_root *maps, u64 ip)
                else if (ip >= m->end)
                        p = &(*p)->rb_right;
                else
-                       return m;
+                       goto out;
        }
 
-       return NULL;
+       m = NULL;
+out:
+       pthread_rwlock_unlock(&maps->lock);
+       return m;
 }
 
-struct map *maps__first(struct rb_root *maps)
+struct map *maps__first(struct maps *maps)
 {
-       struct rb_node *first = rb_first(maps);
+       struct rb_node *first = rb_first(&maps->entries);
 
        if (first)
                return rb_entry(first, struct map, rb_node);
        return NULL;
 }
 
-struct map *maps__next(struct map *map)
+struct map *map__next(struct map *map)
 {
        struct rb_node *next = rb_next(&map->rb_node);
 
index ec19c59ca38e07deba4a8c2c254ec3a4c71c6c91..d73e687b224e4e0d3b427f1244695ea4e19c4fcb 100644 (file)
@@ -1,9 +1,11 @@
 #ifndef __PERF_MAP_H
 #define __PERF_MAP_H
 
+#include <linux/atomic.h>
 #include <linux/compiler.h>
 #include <linux/list.h>
 #include <linux/rbtree.h>
+#include <pthread.h>
 #include <stdio.h>
 #include <stdbool.h>
 #include <linux/types.h>
@@ -32,7 +34,6 @@ struct map {
        u64                     start;
        u64                     end;
        u8 /* enum map_type */  type;
-       bool                    referenced;
        bool                    erange_warned;
        u32                     priv;
        u32                     prot;
@@ -50,6 +51,7 @@ struct map {
 
        struct dso              *dso;
        struct map_groups       *groups;
+       atomic_t                refcnt;
 };
 
 struct kmap {
@@ -57,11 +59,15 @@ struct kmap {
        struct map_groups       *kmaps;
 };
 
+struct maps {
+       struct rb_root   entries;
+       pthread_rwlock_t lock;
+};
+
 struct map_groups {
-       struct rb_root   maps[MAP__NR_TYPES];
-       struct list_head removed_maps[MAP__NR_TYPES];
+       struct maps      maps[MAP__NR_TYPES];
        struct machine   *machine;
-       int              refcnt;
+       atomic_t         refcnt;
 };
 
 struct map_groups *map_groups__new(struct machine *machine);
@@ -70,7 +76,8 @@ bool map_groups__empty(struct map_groups *mg);
 
 static inline struct map_groups *map_groups__get(struct map_groups *mg)
 {
-       ++mg->refcnt;
+       if (mg)
+               atomic_inc(&mg->refcnt);
        return mg;
 }
 
@@ -124,7 +131,7 @@ struct thread;
  */
 #define __map__for_each_symbol_by_name(map, sym_name, pos, filter)     \
        for (pos = map__find_symbol_by_name(map, sym_name, filter);     \
-            pos && strcmp(pos->name, sym_name) == 0;           \
+            pos && arch__compare_symbol_names(pos->name, sym_name) == 0;       \
             pos = symbol__next_by_name(pos))
 
 #define map__for_each_symbol_by_name(map, sym_name, pos)               \
@@ -132,6 +139,7 @@ struct thread;
 
 typedef int (*symbol_filter_t)(struct map *map, struct symbol *sym);
 
+int arch__compare_symbol_names(const char *namea, const char *nameb);
 void map__init(struct map *map, enum map_type type,
               u64 start, u64 end, u64 pgoff, struct dso *dso);
 struct map *map__new(struct machine *machine, u64 start, u64 len,
@@ -141,6 +149,24 @@ struct map *map__new(struct machine *machine, u64 start, u64 len,
 struct map *map__new2(u64 start, struct dso *dso, enum map_type type);
 void map__delete(struct map *map);
 struct map *map__clone(struct map *map);
+
+static inline struct map *map__get(struct map *map)
+{
+       if (map)
+               atomic_inc(&map->refcnt);
+       return map;
+}
+
+void map__put(struct map *map);
+
+static inline void __map__zput(struct map **map)
+{
+       map__put(*map);
+       *map = NULL;
+}
+
+#define map__zput(map) __map__zput(&map)
+
 int map__overlap(struct map *l, struct map *r);
 size_t map__fprintf(struct map *map, FILE *fp);
 size_t map__fprintf_dsoname(struct map *map, FILE *fp);
@@ -159,11 +185,11 @@ void map__reloc_vmlinux(struct map *map);
 
 size_t __map_groups__fprintf_maps(struct map_groups *mg, enum map_type type,
                                  FILE *fp);
-void maps__insert(struct rb_root *maps, struct map *map);
-void maps__remove(struct rb_root *maps, struct map *map);
-struct map *maps__find(struct rb_root *maps, u64 addr);
-struct map *maps__first(struct rb_root *maps);
-struct map *maps__next(struct map *map);
+void maps__insert(struct maps *maps, struct map *map);
+void maps__remove(struct maps *maps, struct map *map);
+struct map *maps__find(struct maps *maps, u64 addr);
+struct map *maps__first(struct maps *maps);
+struct map *map__next(struct map *map);
 void map_groups__init(struct map_groups *mg, struct machine *machine);
 void map_groups__exit(struct map_groups *mg);
 int map_groups__clone(struct map_groups *mg,
@@ -198,7 +224,7 @@ static inline struct map *map_groups__first(struct map_groups *mg,
 
 static inline struct map *map_groups__next(struct map *map)
 {
-       return maps__next(map);
+       return map__next(map);
 }
 
 struct symbol *map_groups__find_symbol(struct map_groups *mg,
@@ -230,6 +256,4 @@ int map_groups__fixup_overlappings(struct map_groups *mg, struct map *map,
 struct map *map_groups__find_by_name(struct map_groups *mg,
                                     enum map_type type, const char *name);
 
-void map_groups__flush(struct map_groups *mg);
-
 #endif /* __PERF_MAP_H */
index 31ee02d4e988a7ea4b0101c0ef4e1ae74f349023..53ef006a951c3f3c90ce8c62e9a5cd73b7750a74 100644 (file)
@@ -50,11 +50,6 @@ void setup_pager(void)
 
        if (!isatty(1))
                return;
-       if (!pager) {
-               if (!pager_program)
-                       perf_config(perf_default_config, NULL);
-               pager = pager_program;
-       }
        if (!pager)
                pager = getenv("PAGER");
        if (!(pager || access("/usr/bin/pager", X_OK)))
diff --git a/tools/perf/util/parse-branch-options.c b/tools/perf/util/parse-branch-options.c
new file mode 100644 (file)
index 0000000..a3b1e13
--- /dev/null
@@ -0,0 +1,94 @@
+#include "perf.h"
+#include "util/util.h"
+#include "util/debug.h"
+#include "util/parse-options.h"
+#include "util/parse-branch-options.h"
+
+#define BRANCH_OPT(n, m) \
+       { .name = n, .mode = (m) }
+
+#define BRANCH_END { .name = NULL }
+
+struct branch_mode {
+       const char *name;
+       int mode;
+};
+
+static const struct branch_mode branch_modes[] = {
+       BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
+       BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
+       BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
+       BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
+       BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
+       BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
+       BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
+       BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
+       BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
+       BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
+       BRANCH_OPT("cond", PERF_SAMPLE_BRANCH_COND),
+       BRANCH_OPT("ind_jmp", PERF_SAMPLE_BRANCH_IND_JUMP),
+       BRANCH_END
+};
+
+int
+parse_branch_stack(const struct option *opt, const char *str, int unset)
+{
+#define ONLY_PLM \
+       (PERF_SAMPLE_BRANCH_USER        |\
+        PERF_SAMPLE_BRANCH_KERNEL      |\
+        PERF_SAMPLE_BRANCH_HV)
+
+       uint64_t *mode = (uint64_t *)opt->value;
+       const struct branch_mode *br;
+       char *s, *os = NULL, *p;
+       int ret = -1;
+
+       if (unset)
+               return 0;
+
+       /*
+        * cannot set it twice, -b + --branch-filter for instance
+        */
+       if (*mode)
+               return -1;
+
+       /* str may be NULL in case no arg is passed to -b */
+       if (str) {
+               /* because str is read-only */
+               s = os = strdup(str);
+               if (!s)
+                       return -1;
+
+               for (;;) {
+                       p = strchr(s, ',');
+                       if (p)
+                               *p = '\0';
+
+                       for (br = branch_modes; br->name; br++) {
+                               if (!strcasecmp(s, br->name))
+                                       break;
+                       }
+                       if (!br->name) {
+                               ui__warning("unknown branch filter %s,"
+                                           " check man page\n", s);
+                               goto error;
+                       }
+
+                       *mode |= br->mode;
+
+                       if (!p)
+                               break;
+
+                       s = p + 1;
+               }
+       }
+       ret = 0;
+
+       /* default to any branch */
+       if ((*mode & ~ONLY_PLM) == 0) {
+               *mode = PERF_SAMPLE_BRANCH_ANY;
+       }
+error:
+       free(os);
+       return ret;
+}
diff --git a/tools/perf/util/parse-branch-options.h b/tools/perf/util/parse-branch-options.h
new file mode 100644 (file)
index 0000000..b9d9470
--- /dev/null
@@ -0,0 +1,5 @@
+#ifndef _PERF_PARSE_BRANCH_OPTIONS_H
+#define _PERF_PARSE_BRANCH_OPTIONS_H 1
+struct option;
+int parse_branch_stack(const struct option *opt, const char *str, int unset);
+#endif /* _PERF_PARSE_BRANCH_OPTIONS_H */
index be0655388b38e4238d69782ee0e9c8357276a574..2a4d1ec028464757d6723bbd08c2d0ce0a010a14 100644 (file)
@@ -17,6 +17,7 @@
 #include "parse-events-flex.h"
 #include "pmu.h"
 #include "thread_map.h"
+#include "asm/bug.h"
 
 #define MAX_NAME_LEN 100
 
@@ -538,16 +539,40 @@ int parse_events_add_breakpoint(struct list_head *list, int *idx,
        return add_event(list, idx, &attr, NULL);
 }
 
+static int check_type_val(struct parse_events_term *term,
+                         struct parse_events_error *err,
+                         int type)
+{
+       if (type == term->type_val)
+               return 0;
+
+       if (err) {
+               err->idx = term->err_val;
+               if (type == PARSE_EVENTS__TERM_TYPE_NUM)
+                       err->str = strdup("expected numeric value");
+               else
+                       err->str = strdup("expected string value");
+       }
+       return -EINVAL;
+}
+
 static int config_term(struct perf_event_attr *attr,
-                      struct parse_events_term *term)
+                      struct parse_events_term *term,
+                      struct parse_events_error *err)
 {
-#define CHECK_TYPE_VAL(type)                                   \
-do {                                                           \
-       if (PARSE_EVENTS__TERM_TYPE_ ## type != term->type_val) \
-               return -EINVAL;                                 \
+#define CHECK_TYPE_VAL(type)                                              \
+do {                                                                      \
+       if (check_type_val(term, err, PARSE_EVENTS__TERM_TYPE_ ## type)) \
+               return -EINVAL;                                            \
 } while (0)
 
        switch (term->type_term) {
+       case PARSE_EVENTS__TERM_TYPE_USER:
+               /*
+                * Always succeed for sysfs terms, as we dont know
+                * at this point what type they need to have.
+                */
+               return 0;
        case PARSE_EVENTS__TERM_TYPE_CONFIG:
                CHECK_TYPE_VAL(NUM);
                attr->config = term->val.num;
@@ -582,18 +607,20 @@ do {                                                              \
 }
 
 static int config_attr(struct perf_event_attr *attr,
-                      struct list_head *head, int fail)
+                      struct list_head *head,
+                      struct parse_events_error *err)
 {
        struct parse_events_term *term;
 
        list_for_each_entry(term, head, list)
-               if (config_term(attr, term) && fail)
+               if (config_term(attr, term, err))
                        return -EINVAL;
 
        return 0;
 }
 
-int parse_events_add_numeric(struct list_head *list, int *idx,
+int parse_events_add_numeric(struct parse_events_evlist *data,
+                            struct list_head *list,
                             u32 type, u64 config,
                             struct list_head *head_config)
 {
@@ -604,10 +631,10 @@ int parse_events_add_numeric(struct list_head *list, int *idx,
        attr.config = config;
 
        if (head_config &&
-           config_attr(&attr, head_config, 1))
+           config_attr(&attr, head_config, data->error))
                return -EINVAL;
 
-       return add_event(list, idx, &attr, NULL);
+       return add_event(list, &data->idx, &attr, NULL);
 }
 
 static int parse_events__is_name_term(struct parse_events_term *term)
@@ -626,8 +653,9 @@ static char *pmu_event_name(struct list_head *head_terms)
        return NULL;
 }
 
-int parse_events_add_pmu(struct list_head *list, int *idx,
-                        char *name, struct list_head *head_config)
+int parse_events_add_pmu(struct parse_events_evlist *data,
+                        struct list_head *list, char *name,
+                        struct list_head *head_config)
 {
        struct perf_event_attr attr;
        struct perf_pmu_info info;
@@ -647,7 +675,7 @@ int parse_events_add_pmu(struct list_head *list, int *idx,
 
        if (!head_config) {
                attr.type = pmu->type;
-               evsel = __add_event(list, idx, &attr, NULL, pmu->cpus);
+               evsel = __add_event(list, &data->idx, &attr, NULL, pmu->cpus);
                return evsel ? 0 : -ENOMEM;
        }
 
@@ -658,13 +686,14 @@ int parse_events_add_pmu(struct list_head *list, int *idx,
         * Configure hardcoded terms first, no need to check
         * return value when called with fail == 0 ;)
         */
-       config_attr(&attr, head_config, 0);
+       if (config_attr(&attr, head_config, data->error))
+               return -EINVAL;
 
-       if (perf_pmu__config(pmu, &attr, head_config))
+       if (perf_pmu__config(pmu, &attr, head_config, data->error))
                return -EINVAL;
 
-       evsel = __add_event(list, idx, &attr, pmu_event_name(head_config),
-                           pmu->cpus);
+       evsel = __add_event(list, &data->idx, &attr,
+                           pmu_event_name(head_config), pmu->cpus);
        if (evsel) {
                evsel->unit = info.unit;
                evsel->scale = info.scale;
@@ -1019,11 +1048,13 @@ int parse_events_terms(struct list_head *terms, const char *str)
        return ret;
 }
 
-int parse_events(struct perf_evlist *evlist, const char *str)
+int parse_events(struct perf_evlist *evlist, const char *str,
+                struct parse_events_error *err)
 {
        struct parse_events_evlist data = {
-               .list = LIST_HEAD_INIT(data.list),
-               .idx  = evlist->nr_entries,
+               .list  = LIST_HEAD_INIT(data.list),
+               .idx   = evlist->nr_entries,
+               .error = err,
        };
        int ret;
 
@@ -1044,16 +1075,87 @@ int parse_events(struct perf_evlist *evlist, const char *str)
        return ret;
 }
 
+#define MAX_WIDTH 1000
+static int get_term_width(void)
+{
+       struct winsize ws;
+
+       get_term_dimensions(&ws);
+       return ws.ws_col > MAX_WIDTH ? MAX_WIDTH : ws.ws_col;
+}
+
+static void parse_events_print_error(struct parse_events_error *err,
+                                    const char *event)
+{
+       const char *str = "invalid or unsupported event: ";
+       char _buf[MAX_WIDTH];
+       char *buf = (char *) event;
+       int idx = 0;
+
+       if (err->str) {
+               /* -2 for extra '' in the final fprintf */
+               int width       = get_term_width() - 2;
+               int len_event   = strlen(event);
+               int len_str, max_len, cut = 0;
+
+               /*
+                * Maximum error index indent, we will cut
+                * the event string if it's bigger.
+                */
+               int max_err_idx = 10;
+
+               /*
+                * Let's be specific with the message when
+                * we have the precise error.
+                */
+               str     = "event syntax error: ";
+               len_str = strlen(str);
+               max_len = width - len_str;
+
+               buf = _buf;
+
+               /* We're cutting from the beggining. */
+               if (err->idx > max_err_idx)
+                       cut = err->idx - max_err_idx;
+
+               strncpy(buf, event + cut, max_len);
+
+               /* Mark cut parts with '..' on both sides. */
+               if (cut)
+                       buf[0] = buf[1] = '.';
+
+               if ((len_event - cut) > max_len) {
+                       buf[max_len - 1] = buf[max_len - 2] = '.';
+                       buf[max_len] = 0;
+               }
+
+               idx = len_str + err->idx - cut;
+       }
+
+       fprintf(stderr, "%s'%s'\n", str, buf);
+       if (idx) {
+               fprintf(stderr, "%*s\\___ %s\n", idx + 1, "", err->str);
+               if (err->help)
+                       fprintf(stderr, "\n%s\n", err->help);
+               free(err->str);
+               free(err->help);
+       }
+
+       fprintf(stderr, "Run 'perf list' for a list of valid events\n");
+}
+
+#undef MAX_WIDTH
+
 int parse_events_option(const struct option *opt, const char *str,
                        int unset __maybe_unused)
 {
        struct perf_evlist *evlist = *(struct perf_evlist **)opt->value;
-       int ret = parse_events(evlist, str);
+       struct parse_events_error err = { .idx = 0, };
+       int ret = parse_events(evlist, str, &err);
+
+       if (ret)
+               parse_events_print_error(&err, str);
 
-       if (ret) {
-               fprintf(stderr, "invalid or unsupported event: '%s'\n", str);
-               fprintf(stderr, "Run 'perf list' for a list of valid events\n");
-       }
        return ret;
 }
 
@@ -1460,7 +1562,7 @@ int parse_events__is_hardcoded_term(struct parse_events_term *term)
 
 static int new_term(struct parse_events_term **_term, int type_val,
                    int type_term, char *config,
-                   char *str, u64 num)
+                   char *str, u64 num, int err_term, int err_val)
 {
        struct parse_events_term *term;
 
@@ -1472,6 +1574,8 @@ static int new_term(struct parse_events_term **_term, int type_val,
        term->type_val  = type_val;
        term->type_term = type_term;
        term->config = config;
+       term->err_term = err_term;
+       term->err_val  = err_val;
 
        switch (type_val) {
        case PARSE_EVENTS__TERM_TYPE_NUM:
@@ -1490,17 +1594,29 @@ static int new_term(struct parse_events_term **_term, int type_val,
 }
 
 int parse_events_term__num(struct parse_events_term **term,
-                          int type_term, char *config, u64 num)
+                          int type_term, char *config, u64 num,
+                          void *loc_term_, void *loc_val_)
 {
+       YYLTYPE *loc_term = loc_term_;
+       YYLTYPE *loc_val = loc_val_;
+
        return new_term(term, PARSE_EVENTS__TERM_TYPE_NUM, type_term,
-                       config, NULL, num);
+                       config, NULL, num,
+                       loc_term ? loc_term->first_column : 0,
+                       loc_val ? loc_val->first_column : 0);
 }
 
 int parse_events_term__str(struct parse_events_term **term,
-                          int type_term, char *config, char *str)
+                          int type_term, char *config, char *str,
+                          void *loc_term_, void *loc_val_)
 {
+       YYLTYPE *loc_term = loc_term_;
+       YYLTYPE *loc_val = loc_val_;
+
        return new_term(term, PARSE_EVENTS__TERM_TYPE_STR, type_term,
-                       config, str, 0);
+                       config, str, 0,
+                       loc_term ? loc_term->first_column : 0,
+                       loc_val ? loc_val->first_column : 0);
 }
 
 int parse_events_term__sym_hw(struct parse_events_term **term,
@@ -1514,18 +1630,20 @@ int parse_events_term__sym_hw(struct parse_events_term **term,
        if (config)
                return new_term(term, PARSE_EVENTS__TERM_TYPE_STR,
                                PARSE_EVENTS__TERM_TYPE_USER, config,
-                               (char *) sym->symbol, 0);
+                               (char *) sym->symbol, 0, 0, 0);
        else
                return new_term(term, PARSE_EVENTS__TERM_TYPE_STR,
                                PARSE_EVENTS__TERM_TYPE_USER,
-                               (char *) "event", (char *) sym->symbol, 0);
+                               (char *) "event", (char *) sym->symbol,
+                               0, 0, 0);
 }
 
 int parse_events_term__clone(struct parse_events_term **new,
                             struct parse_events_term *term)
 {
        return new_term(new, term->type_val, term->type_term, term->config,
-                       term->val.str, term->val.num);
+                       term->val.str, term->val.num,
+                       term->err_term, term->err_val);
 }
 
 void parse_events__free_terms(struct list_head *terms)
@@ -1535,3 +1653,15 @@ void parse_events__free_terms(struct list_head *terms)
        list_for_each_entry_safe(term, h, terms, list)
                free(term);
 }
+
+void parse_events_evlist_error(struct parse_events_evlist *data,
+                              int idx, const char *str)
+{
+       struct parse_events_error *err = data->error;
+
+       if (!err)
+               return;
+       err->idx = idx;
+       err->str = strdup(str);
+       WARN_ONCE(!err->str, "WARNING: failed to allocate error string");
+}
index 52a2dda4f954a7682d43376bd3ac2ee91d127447..131f29b2f13258d647276820026207526a08ef02 100644 (file)
@@ -12,6 +12,7 @@
 struct list_head;
 struct perf_evsel;
 struct perf_evlist;
+struct parse_events_error;
 
 struct option;
 
@@ -29,7 +30,8 @@ const char *event_type(int type);
 
 extern int parse_events_option(const struct option *opt, const char *str,
                               int unset);
-extern int parse_events(struct perf_evlist *evlist, const char *str);
+extern int parse_events(struct perf_evlist *evlist, const char *str,
+                       struct parse_events_error *error);
 extern int parse_events_terms(struct list_head *terms, const char *str);
 extern int parse_filter(const struct option *opt, const char *str, int unset);
 
@@ -72,12 +74,23 @@ struct parse_events_term {
        int type_term;
        struct list_head list;
        bool used;
+
+       /* error string indexes for within parsed string */
+       int err_term;
+       int err_val;
+};
+
+struct parse_events_error {
+       int   idx;      /* index in the parsed string */
+       char *str;      /* string to display at the index */
+       char *help;     /* optional help string */
 };
 
 struct parse_events_evlist {
-       struct list_head list;
-       int idx;
-       int nr_groups;
+       struct list_head           list;
+       int                        idx;
+       int                        nr_groups;
+       struct parse_events_error *error;
 };
 
 struct parse_events_terms {
@@ -85,10 +98,12 @@ struct parse_events_terms {
 };
 
 int parse_events__is_hardcoded_term(struct parse_events_term *term);
-int parse_events_term__num(struct parse_events_term **_term,
-                          int type_term, char *config, u64 num);
-int parse_events_term__str(struct parse_events_term **_term,
-                          int type_term, char *config, char *str);
+int parse_events_term__num(struct parse_events_term **term,
+                          int type_term, char *config, u64 num,
+                          void *loc_term, void *loc_val);
+int parse_events_term__str(struct parse_events_term **term,
+                          int type_term, char *config, char *str,
+                          void *loc_term, void *loc_val);
 int parse_events_term__sym_hw(struct parse_events_term **term,
                              char *config, unsigned idx);
 int parse_events_term__clone(struct parse_events_term **new,
@@ -99,21 +114,24 @@ int parse_events__modifier_group(struct list_head *list, char *event_mod);
 int parse_events_name(struct list_head *list, char *name);
 int parse_events_add_tracepoint(struct list_head *list, int *idx,
                                char *sys, char *event);
-int parse_events_add_numeric(struct list_head *list, int *idx,
+int parse_events_add_numeric(struct parse_events_evlist *data,
+                            struct list_head *list,
                             u32 type, u64 config,
                             struct list_head *head_config);
 int parse_events_add_cache(struct list_head *list, int *idx,
                           char *type, char *op_result1, char *op_result2);
 int parse_events_add_breakpoint(struct list_head *list, int *idx,
                                void *ptr, char *type, u64 len);
-int parse_events_add_pmu(struct list_head *list, int *idx,
-                        char *pmu , struct list_head *head_config);
+int parse_events_add_pmu(struct parse_events_evlist *data,
+                        struct list_head *list, char *name,
+                        struct list_head *head_config);
 enum perf_pmu_event_symbol_type
 perf_pmu__parse_check(const char *name);
 void parse_events__set_leader(char *name, struct list_head *list);
 void parse_events_update_lists(struct list_head *list_event,
                               struct list_head *list_all);
-void parse_events_error(void *data, void *scanner, char const *msg);
+void parse_events_evlist_error(struct parse_events_evlist *data,
+                              int idx, const char *str);
 
 void print_events(const char *event_glob, bool name_only);
 
index 8895cf3132ab242c078c70c6f7713f52030c9f6a..09e738fe9ea2790a1c304f2015cdb20c03c20614 100644 (file)
@@ -3,6 +3,8 @@
 %option bison-bridge
 %option prefix="parse_events_"
 %option stack
+%option bison-locations
+%option yylineno
 
 %{
 #include <errno.h>
@@ -51,6 +53,18 @@ static int str(yyscan_t scanner, int token)
        return token;
 }
 
+#define REWIND(__alloc)                                \
+do {                                                           \
+       YYSTYPE *__yylval = parse_events_get_lval(yyscanner);   \
+       char *text = parse_events_get_text(yyscanner);          \
+                                                               \
+       if (__alloc)                                            \
+               __yylval->str = strdup(text);                   \
+                                                               \
+       yycolumn -= strlen(text);                               \
+       yyless(0);                                              \
+} while (0)
+
 static int pmu_str_check(yyscan_t scanner)
 {
        YYSTYPE *yylval = parse_events_get_lval(scanner);
@@ -85,6 +99,13 @@ static int term(yyscan_t scanner, int type)
        return PE_TERM;
 }
 
+#define YY_USER_ACTION                                 \
+do {                                                   \
+       yylloc->last_column  = yylloc->first_column;    \
+       yylloc->first_column = yycolumn;                \
+       yycolumn += yyleng;                             \
+} while (0);
+
 %}
 
 %x mem
@@ -119,6 +140,12 @@ modifier_bp        [rwx]{1,3}
 
                if (start_token) {
                        parse_events_set_extra(NULL, yyscanner);
+                       /*
+                        * The flex parser does not init locations variable
+                        * via the scan_string interface, so we need do the
+                        * init in here.
+                        */
+                       yycolumn = 0;
                        return start_token;
                }
          }
@@ -127,24 +154,30 @@ modifier_bp       [rwx]{1,3}
 <event>{
 
 {group}                {
-                       BEGIN(INITIAL); yyless(0);
+                       BEGIN(INITIAL);
+                       REWIND(0);
                }
 
 {event_pmu}    |
 {event}                {
-                       str(yyscanner, PE_EVENT_NAME);
-                       BEGIN(INITIAL); yyless(0);
+                       BEGIN(INITIAL);
+                       REWIND(1);
                        return PE_EVENT_NAME;
                }
 
 .              |
 <<EOF>>                {
-                       BEGIN(INITIAL); yyless(0);
+                       BEGIN(INITIAL);
+                       REWIND(0);
                }
 
 }
 
 <config>{
+       /*
+        * Please update formats_error_string any time
+        * new static term is added.
+        */
 config                 { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG); }
 config1                        { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG1); }
 config2                        { return term(yyscanner, PARSE_EVENTS__TERM_TYPE_CONFIG2); }
index 72def077dbbfda149dfe893135dd3940ca2ed648..591905a02b926b6029447a372f2e5f7b7d34864a 100644 (file)
@@ -2,6 +2,7 @@
 %parse-param {void *_data}
 %parse-param {void *scanner}
 %lex-param {void* scanner}
+%locations
 
 %{
 
@@ -14,8 +15,6 @@
 #include "parse-events.h"
 #include "parse-events-bison.h"
 
-extern int parse_events_lex (YYSTYPE* lvalp, void* scanner);
-
 #define ABORT_ON(val) \
 do { \
        if (val) \
@@ -208,7 +207,7 @@ PE_NAME '/' event_config '/'
        struct list_head *list;
 
        ALLOC_LIST(list);
-       ABORT_ON(parse_events_add_pmu(list, &data->idx, $1, $3));
+       ABORT_ON(parse_events_add_pmu(data, list, $1, $3));
        parse_events__free_terms($3);
        $$ = list;
 }
@@ -219,7 +218,7 @@ PE_NAME '/' '/'
        struct list_head *list;
 
        ALLOC_LIST(list);
-       ABORT_ON(parse_events_add_pmu(list, &data->idx, $1, NULL));
+       ABORT_ON(parse_events_add_pmu(data, list, $1, NULL));
        $$ = list;
 }
 |
@@ -232,11 +231,11 @@ PE_KERNEL_PMU_EVENT sep_dc
 
        ALLOC_LIST(head);
        ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
-                                       $1, 1));
+                                       $1, 1, &@1, NULL));
        list_add_tail(&term->list, head);
 
        ALLOC_LIST(list);
-       ABORT_ON(parse_events_add_pmu(list, &data->idx, "cpu", head));
+       ABORT_ON(parse_events_add_pmu(data, list, "cpu", head));
        parse_events__free_terms(head);
        $$ = list;
 }
@@ -252,7 +251,7 @@ PE_PMU_EVENT_PRE '-' PE_PMU_EVENT_SUF sep_dc
 
        ALLOC_LIST(head);
        ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
-                                       &pmu_name, 1));
+                                       &pmu_name, 1, &@1, NULL));
        list_add_tail(&term->list, head);
 
        ALLOC_LIST(list);
@@ -275,8 +274,7 @@ value_sym '/' event_config '/'
        int config = $1 & 255;
 
        ALLOC_LIST(list);
-       ABORT_ON(parse_events_add_numeric(list, &data->idx,
-                                         type, config, $3));
+       ABORT_ON(parse_events_add_numeric(data, list, type, config, $3));
        parse_events__free_terms($3);
        $$ = list;
 }
@@ -289,8 +287,7 @@ value_sym sep_slash_dc
        int config = $1 & 255;
 
        ALLOC_LIST(list);
-       ABORT_ON(parse_events_add_numeric(list, &data->idx,
-                                         type, config, NULL));
+       ABORT_ON(parse_events_add_numeric(data, list, type, config, NULL));
        $$ = list;
 }
 
@@ -389,7 +386,15 @@ PE_NAME ':' PE_NAME
        struct list_head *list;
 
        ALLOC_LIST(list);
-       ABORT_ON(parse_events_add_tracepoint(list, &data->idx, $1, $3));
+       if (parse_events_add_tracepoint(list, &data->idx, $1, $3)) {
+               struct parse_events_error *error = data->error;
+
+               if (error) {
+                       error->idx = @1.first_column;
+                       error->str = strdup("unknown tracepoint");
+               }
+               return -1;
+       }
        $$ = list;
 }
 
@@ -400,7 +405,7 @@ PE_VALUE ':' PE_VALUE
        struct list_head *list;
 
        ALLOC_LIST(list);
-       ABORT_ON(parse_events_add_numeric(list, &data->idx, (u32)$1, $3, NULL));
+       ABORT_ON(parse_events_add_numeric(data, list, (u32)$1, $3, NULL));
        $$ = list;
 }
 
@@ -411,8 +416,7 @@ PE_RAW
        struct list_head *list;
 
        ALLOC_LIST(list);
-       ABORT_ON(parse_events_add_numeric(list, &data->idx,
-                                         PERF_TYPE_RAW, $1, NULL));
+       ABORT_ON(parse_events_add_numeric(data, list, PERF_TYPE_RAW, $1, NULL));
        $$ = list;
 }
 
@@ -450,7 +454,7 @@ PE_NAME '=' PE_NAME
        struct parse_events_term *term;
 
        ABORT_ON(parse_events_term__str(&term, PARSE_EVENTS__TERM_TYPE_USER,
-                                       $1, $3));
+                                       $1, $3, &@1, &@3));
        $$ = term;
 }
 |
@@ -459,7 +463,7 @@ PE_NAME '=' PE_VALUE
        struct parse_events_term *term;
 
        ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
-                                       $1, $3));
+                                       $1, $3, &@1, &@3));
        $$ = term;
 }
 |
@@ -477,7 +481,7 @@ PE_NAME
        struct parse_events_term *term;
 
        ABORT_ON(parse_events_term__num(&term, PARSE_EVENTS__TERM_TYPE_USER,
-                                       $1, 1));
+                                       $1, 1, &@1, NULL));
        $$ = term;
 }
 |
@@ -494,7 +498,7 @@ PE_TERM '=' PE_NAME
 {
        struct parse_events_term *term;
 
-       ABORT_ON(parse_events_term__str(&term, (int)$1, NULL, $3));
+       ABORT_ON(parse_events_term__str(&term, (int)$1, NULL, $3, &@1, &@3));
        $$ = term;
 }
 |
@@ -502,7 +506,7 @@ PE_TERM '=' PE_VALUE
 {
        struct parse_events_term *term;
 
-       ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, $3));
+       ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, $3, &@1, &@3));
        $$ = term;
 }
 |
@@ -510,7 +514,7 @@ PE_TERM
 {
        struct parse_events_term *term;
 
-       ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, 1));
+       ABORT_ON(parse_events_term__num(&term, (int)$1, NULL, 1, &@1, NULL));
        $$ = term;
 }
 
@@ -520,7 +524,9 @@ sep_slash_dc: '/' | ':' |
 
 %%
 
-void parse_events_error(void *data __maybe_unused, void *scanner __maybe_unused,
+void parse_events_error(YYLTYPE *loc, void *data,
+                       void *scanner __maybe_unused,
                        char const *msg __maybe_unused)
 {
+       parse_events_evlist_error(data, loc->last_column, "parser error");
 }
index 59561fd86278276040fcde6c075b334008e13d67..367d8b816cc7e7ae0a99bb8200e0bd81b7999546 100644 (file)
@@ -123,6 +123,10 @@ struct option {
 #define OPT_LONG(s, l, v, h)        { .type = OPTION_LONG, .short_name = (s), .long_name = (l), .value = check_vtype(v, long *), .help = (h) }
 #define OPT_U64(s, l, v, h)         { .type = OPTION_U64, .short_name = (s), .long_name = (l), .value = check_vtype(v, u64 *), .help = (h) }
 #define OPT_STRING(s, l, v, a, h)   { .type = OPTION_STRING,  .short_name = (s), .long_name = (l), .value = check_vtype(v, const char **), (a), .help = (h) }
+#define OPT_STRING_OPTARG(s, l, v, a, h, d) \
+       { .type = OPTION_STRING,  .short_name = (s), .long_name = (l), \
+         .value = check_vtype(v, const char **), (a), .help = (h), \
+         .flags = PARSE_OPT_OPTARG, .defval = (intptr_t)(d) }
 #define OPT_STRING_NOEMPTY(s, l, v, a, h)   { .type = OPTION_STRING,  .short_name = (s), .long_name = (l), .value = check_vtype(v, const char **), (a), .help = (h), .flags = PARSE_OPT_NOEMPTY}
 #define OPT_DATE(s, l, v, h) \
        { .type = OPTION_CALLBACK, .short_name = (s), .long_name = (l), .value = (v), .argh = "time", .help = (h), .callback = parse_opt_approxidate_cb }
index 48411674da0f9cef6c87ba74a08bec513b112d6c..0fcc624eb76767b1c3fe211f678a981a71b744ce 100644 (file)
@@ -112,7 +112,11 @@ static int perf_pmu__parse_scale(struct perf_pmu_alias *alias, char *dir, char *
        if (sret < 0)
                goto error;
 
-       scale[sret] = '\0';
+       if (scale[sret - 1] == '\n')
+               scale[sret - 1] = '\0';
+       else
+               scale[sret] = '\0';
+
        /*
         * save current locale
         */
@@ -154,7 +158,10 @@ static int perf_pmu__parse_unit(struct perf_pmu_alias *alias, char *dir, char *n
 
        close(fd);
 
-       alias->unit[sret] = '\0';
+       if (alias->unit[sret - 1] == '\n')
+               alias->unit[sret - 1] = '\0';
+       else
+               alias->unit[sret] = '\0';
 
        return 0;
 error:
@@ -442,6 +449,10 @@ static struct perf_pmu *pmu_lookup(const char *name)
        LIST_HEAD(aliases);
        __u32 type;
 
+       /* No support for intel_bts or intel_pt so disallow them */
+       if (!strcmp(name, "intel_bts") || !strcmp(name, "intel_pt"))
+               return NULL;
+
        /*
         * The pmu data we store & need consists of the pmu
         * type value and format definitions. Load both right
@@ -579,6 +590,38 @@ static int pmu_resolve_param_term(struct parse_events_term *term,
        return -1;
 }
 
+static char *formats_error_string(struct list_head *formats)
+{
+       struct perf_pmu_format *format;
+       char *err, *str;
+       static const char *static_terms = "config,config1,config2,name,period,branch_type\n";
+       unsigned i = 0;
+
+       if (!asprintf(&str, "valid terms:"))
+               return NULL;
+
+       /* sysfs exported terms */
+       list_for_each_entry(format, formats, list) {
+               char c = i++ ? ',' : ' ';
+
+               err = str;
+               if (!asprintf(&str, "%s%c%s", err, c, format->name))
+                       goto fail;
+               free(err);
+       }
+
+       /* static terms */
+       err = str;
+       if (!asprintf(&str, "%s,%s", err, static_terms))
+               goto fail;
+
+       free(err);
+       return str;
+fail:
+       free(err);
+       return NULL;
+}
+
 /*
  * Setup one of config[12] attr members based on the
  * user input data - term parameter.
@@ -587,7 +630,7 @@ static int pmu_config_term(struct list_head *formats,
                           struct perf_event_attr *attr,
                           struct parse_events_term *term,
                           struct list_head *head_terms,
-                          bool zero)
+                          bool zero, struct parse_events_error *err)
 {
        struct perf_pmu_format *format;
        __u64 *vp;
@@ -611,6 +654,11 @@ static int pmu_config_term(struct list_head *formats,
        if (!format) {
                if (verbose)
                        printf("Invalid event/parameter '%s'\n", term->config);
+               if (err) {
+                       err->idx  = term->err_term;
+                       err->str  = strdup("unknown term");
+                       err->help = formats_error_string(formats);
+               }
                return -EINVAL;
        }
 
@@ -636,9 +684,14 @@ static int pmu_config_term(struct list_head *formats,
                val = term->val.num;
        else if (term->type_val == PARSE_EVENTS__TERM_TYPE_STR) {
                if (strcmp(term->val.str, "?")) {
-                       if (verbose)
+                       if (verbose) {
                                pr_info("Invalid sysfs entry %s=%s\n",
                                                term->config, term->val.str);
+                       }
+                       if (err) {
+                               err->idx = term->err_val;
+                               err->str = strdup("expected numeric value");
+                       }
                        return -EINVAL;
                }
 
@@ -654,12 +707,13 @@ static int pmu_config_term(struct list_head *formats,
 int perf_pmu__config_terms(struct list_head *formats,
                           struct perf_event_attr *attr,
                           struct list_head *head_terms,
-                          bool zero)
+                          bool zero, struct parse_events_error *err)
 {
        struct parse_events_term *term;
 
        list_for_each_entry(term, head_terms, list) {
-               if (pmu_config_term(formats, attr, term, head_terms, zero))
+               if (pmu_config_term(formats, attr, term, head_terms,
+                                   zero, err))
                        return -EINVAL;
        }
 
@@ -672,12 +726,14 @@ int perf_pmu__config_terms(struct list_head *formats,
  * 2) pmu format definitions - specified by pmu parameter
  */
 int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
-                    struct list_head *head_terms)
+                    struct list_head *head_terms,
+                    struct parse_events_error *err)
 {
        bool zero = !!pmu->default_config;
 
        attr->type = pmu->type;
-       return perf_pmu__config_terms(&pmu->format, attr, head_terms, zero);
+       return perf_pmu__config_terms(&pmu->format, attr, head_terms,
+                                     zero, err);
 }
 
 static struct perf_pmu_alias *pmu_find_alias(struct perf_pmu *pmu,
index 6b1249fbdb5f3a736c7c55a4005bf2c679addfa7..7b9c8cf8ae3e590578abb0f71a41739cb8fc968a 100644 (file)
@@ -4,6 +4,7 @@
 #include <linux/bitmap.h>
 #include <linux/perf_event.h>
 #include <stdbool.h>
+#include "parse-events.h"
 
 enum {
        PERF_PMU_FORMAT_VALUE_CONFIG,
@@ -47,11 +48,12 @@ struct perf_pmu_alias {
 
 struct perf_pmu *perf_pmu__find(const char *name);
 int perf_pmu__config(struct perf_pmu *pmu, struct perf_event_attr *attr,
-                    struct list_head *head_terms);
+                    struct list_head *head_terms,
+                    struct parse_events_error *error);
 int perf_pmu__config_terms(struct list_head *formats,
                           struct perf_event_attr *attr,
                           struct list_head *head_terms,
-                          bool zero);
+                          bool zero, struct parse_events_error *error);
 int perf_pmu__check_alias(struct perf_pmu *pmu, struct list_head *head_terms,
                          struct perf_pmu_info *info);
 struct list_head *perf_pmu__alias(struct perf_pmu *pmu,
index d05b77cf35f77051354b9d08acc035cf4575dd5b..076527b639bdbcab38b4e196f1d388352f7e22c1 100644 (file)
@@ -51,6 +51,7 @@
 #define PERFPROBE_GROUP "probe"
 
 bool probe_event_dry_run;      /* Dry run flag */
+struct probe_conf probe_conf;
 
 #define semantic_error(msg ...) pr_err("Semantic error :" msg)
 
@@ -161,18 +162,18 @@ static u64 kernel_get_symbol_address_by_name(const char *name, bool reloc)
 
 static struct map *kernel_get_module_map(const char *module)
 {
-       struct rb_node *nd;
        struct map_groups *grp = &host_machine->kmaps;
+       struct maps *maps = &grp->maps[MAP__FUNCTION];
+       struct map *pos;
 
        /* A file path -- this is an offline module */
        if (module && strchr(module, '/'))
-               return machine__new_module(host_machine, 0, module);
+               return machine__findnew_module_map(host_machine, 0, module);
 
        if (!module)
                module = "kernel";
 
-       for (nd = rb_first(&grp->maps[MAP__FUNCTION]); nd; nd = rb_next(nd)) {
-               struct map *pos = rb_entry(nd, struct map, rb_node);
+       for (pos = maps__first(maps); pos; pos = map__next(pos)) {
                if (strncmp(pos->dso->short_name + 1, module,
                            pos->dso->short_name_len - 2) == 0) {
                        return pos;
@@ -194,52 +195,11 @@ static void put_target_map(struct map *map, bool user)
 {
        if (map && user) {
                /* Only the user map needs to be released */
-               dso__delete(map->dso);
-               map__delete(map);
+               map__put(map);
        }
 }
 
 
-static struct dso *kernel_get_module_dso(const char *module)
-{
-       struct dso *dso;
-       struct map *map;
-       const char *vmlinux_name;
-
-       if (module) {
-               list_for_each_entry(dso, &host_machine->kernel_dsos.head,
-                                   node) {
-                       if (strncmp(dso->short_name + 1, module,
-                                   dso->short_name_len - 2) == 0)
-                               goto found;
-               }
-               pr_debug("Failed to find module %s.\n", module);
-               return NULL;
-       }
-
-       map = host_machine->vmlinux_maps[MAP__FUNCTION];
-       dso = map->dso;
-
-       vmlinux_name = symbol_conf.vmlinux_name;
-       if (vmlinux_name) {
-               if (dso__load_vmlinux(dso, map, vmlinux_name, false, NULL) <= 0)
-                       return NULL;
-       } else {
-               if (dso__load_vmlinux_path(dso, map, NULL) <= 0) {
-                       pr_debug("Failed to load kernel map.\n");
-                       return NULL;
-               }
-       }
-found:
-       return dso;
-}
-
-const char *kernel_get_module_path(const char *module)
-{
-       struct dso *dso = kernel_get_module_dso(module);
-       return (dso) ? dso->long_name : NULL;
-}
-
 static int convert_exec_to_group(const char *exec, char **result)
 {
        char *ptr1, *ptr2, *exec_copy;
@@ -286,7 +246,55 @@ static void clear_probe_trace_events(struct probe_trace_event *tevs, int ntevs)
                clear_probe_trace_event(tevs + i);
 }
 
+static bool kprobe_blacklist__listed(unsigned long address);
+static bool kprobe_warn_out_range(const char *symbol, unsigned long address)
+{
+       /* Get the address of _etext for checking non-probable text symbol */
+       if (kernel_get_symbol_address_by_name("_etext", false) < address)
+               pr_warning("%s is out of .text, skip it.\n", symbol);
+       else if (kprobe_blacklist__listed(address))
+               pr_warning("%s is blacklisted function, skip it.\n", symbol);
+       else
+               return false;
+
+       return true;
+}
+
 #ifdef HAVE_DWARF_SUPPORT
+
+static int kernel_get_module_dso(const char *module, struct dso **pdso)
+{
+       struct dso *dso;
+       struct map *map;
+       const char *vmlinux_name;
+       int ret = 0;
+
+       if (module) {
+               list_for_each_entry(dso, &host_machine->dsos.head, node) {
+                       if (!dso->kernel)
+                               continue;
+                       if (strncmp(dso->short_name + 1, module,
+                                   dso->short_name_len - 2) == 0)
+                               goto found;
+               }
+               pr_debug("Failed to find module %s.\n", module);
+               return -ENOENT;
+       }
+
+       map = host_machine->vmlinux_maps[MAP__FUNCTION];
+       dso = map->dso;
+
+       vmlinux_name = symbol_conf.vmlinux_name;
+       dso->load_errno = 0;
+       if (vmlinux_name)
+               ret = dso__load_vmlinux(dso, map, vmlinux_name, false, NULL);
+       else
+               ret = dso__load_vmlinux_path(dso, map, NULL);
+found:
+       *pdso = dso;
+       return ret;
+}
+
 /*
  * Some binaries like glibc have special symbols which are on the symbol
  * table, but not in the debuginfo. If we can find the address of the
@@ -344,15 +352,14 @@ out:
 
 static int get_alternative_probe_event(struct debuginfo *dinfo,
                                       struct perf_probe_event *pev,
-                                      struct perf_probe_point *tmp,
-                                      const char *target)
+                                      struct perf_probe_point *tmp)
 {
        int ret;
 
        memcpy(tmp, &pev->point, sizeof(*tmp));
        memset(&pev->point, 0, sizeof(pev->point));
        ret = find_alternative_probe_point(dinfo, tmp, &pev->point,
-                                          target, pev->uprobes);
+                                          pev->target, pev->uprobes);
        if (ret < 0)
                memcpy(&pev->point, tmp, sizeof(*tmp));
 
@@ -390,16 +397,25 @@ static int get_alternative_line_range(struct debuginfo *dinfo,
 static struct debuginfo *open_debuginfo(const char *module, bool silent)
 {
        const char *path = module;
-       struct debuginfo *ret;
+       char reason[STRERR_BUFSIZE];
+       struct debuginfo *ret = NULL;
+       struct dso *dso = NULL;
+       int err;
 
        if (!module || !strchr(module, '/')) {
-               path = kernel_get_module_path(module);
-               if (!path) {
+               err = kernel_get_module_dso(module, &dso);
+               if (err < 0) {
+                       if (!dso || dso->load_errno == 0) {
+                               if (!strerror_r(-err, reason, STRERR_BUFSIZE))
+                                       strcpy(reason, "(unknown)");
+                       } else
+                               dso__strerror_load(dso, reason, STRERR_BUFSIZE);
                        if (!silent)
-                               pr_err("Failed to find path of %s module.\n",
-                                      module ?: "kernel");
+                               pr_err("Failed to find the path for %s: %s\n",
+                                       module ?: "kernel", reason);
                        return NULL;
                }
+               path = dso->long_name;
        }
        ret = debuginfo__new(path);
        if (!ret && !silent) {
@@ -413,6 +429,41 @@ static struct debuginfo *open_debuginfo(const char *module, bool silent)
        return ret;
 }
 
+/* For caching the last debuginfo */
+static struct debuginfo *debuginfo_cache;
+static char *debuginfo_cache_path;
+
+static struct debuginfo *debuginfo_cache__open(const char *module, bool silent)
+{
+       if ((debuginfo_cache_path && !strcmp(debuginfo_cache_path, module)) ||
+           (!debuginfo_cache_path && !module && debuginfo_cache))
+               goto out;
+
+       /* Copy module path */
+       free(debuginfo_cache_path);
+       if (module) {
+               debuginfo_cache_path = strdup(module);
+               if (!debuginfo_cache_path) {
+                       debuginfo__delete(debuginfo_cache);
+                       debuginfo_cache = NULL;
+                       goto out;
+               }
+       }
+
+       debuginfo_cache = open_debuginfo(module, silent);
+       if (!debuginfo_cache)
+               zfree(&debuginfo_cache_path);
+out:
+       return debuginfo_cache;
+}
+
+static void debuginfo_cache__exit(void)
+{
+       debuginfo__delete(debuginfo_cache);
+       debuginfo_cache = NULL;
+       zfree(&debuginfo_cache_path);
+}
+
 
 static int get_text_start_address(const char *exec, unsigned long *address)
 {
@@ -474,12 +525,11 @@ static int find_perf_probe_point_from_dwarf(struct probe_trace_point *tp,
        pr_debug("try to find information at %" PRIx64 " in %s\n", addr,
                 tp->module ? : "kernel");
 
-       dinfo = open_debuginfo(tp->module, verbose == 0);
-       if (dinfo) {
+       dinfo = debuginfo_cache__open(tp->module, verbose == 0);
+       if (dinfo)
                ret = debuginfo__find_probe_point(dinfo,
                                                 (unsigned long)addr, pp);
-               debuginfo__delete(dinfo);
-       } else
+       else
                ret = -ENOENT;
 
        if (ret > 0) {
@@ -558,7 +608,7 @@ static int post_process_probe_trace_events(struct probe_trace_event *tevs,
 {
        struct ref_reloc_sym *reloc_sym;
        char *tmp;
-       int i;
+       int i, skipped = 0;
 
        if (uprobe)
                return add_exec_to_probe_trace_events(tevs, ntevs, module);
@@ -574,31 +624,40 @@ static int post_process_probe_trace_events(struct probe_trace_event *tevs,
        }
 
        for (i = 0; i < ntevs; i++) {
-               if (tevs[i].point.address && !tevs[i].point.retprobe) {
+               if (!tevs[i].point.address || tevs[i].point.retprobe)
+                       continue;
+               /* If we found a wrong one, mark it by NULL symbol */
+               if (kprobe_warn_out_range(tevs[i].point.symbol,
+                                         tevs[i].point.address)) {
+                       tmp = NULL;
+                       skipped++;
+               } else {
                        tmp = strdup(reloc_sym->name);
                        if (!tmp)
                                return -ENOMEM;
-                       free(tevs[i].point.symbol);
-                       tevs[i].point.symbol = tmp;
-                       tevs[i].point.offset = tevs[i].point.address -
-                                              reloc_sym->unrelocated_addr;
                }
+               /* If we have no realname, use symbol for it */
+               if (!tevs[i].point.realname)
+                       tevs[i].point.realname = tevs[i].point.symbol;
+               else
+                       free(tevs[i].point.symbol);
+               tevs[i].point.symbol = tmp;
+               tevs[i].point.offset = tevs[i].point.address -
+                                      reloc_sym->unrelocated_addr;
        }
-       return 0;
+       return skipped;
 }
 
 /* Try to find perf_probe_event with debuginfo */
 static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
-                                         struct probe_trace_event **tevs,
-                                         int max_tevs, const char *target)
+                                         struct probe_trace_event **tevs)
 {
        bool need_dwarf = perf_probe_event_need_dwarf(pev);
        struct perf_probe_point tmp;
        struct debuginfo *dinfo;
        int ntevs, ret = 0;
 
-       dinfo = open_debuginfo(target, !need_dwarf);
-
+       dinfo = open_debuginfo(pev->target, !need_dwarf);
        if (!dinfo) {
                if (need_dwarf)
                        return -ENOENT;
@@ -608,13 +667,12 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
 
        pr_debug("Try to find probe point from debuginfo.\n");
        /* Searching trace events corresponding to a probe event */
-       ntevs = debuginfo__find_trace_events(dinfo, pev, tevs, max_tevs);
+       ntevs = debuginfo__find_trace_events(dinfo, pev, tevs);
 
        if (ntevs == 0) {  /* Not found, retry with an alternative */
-               ret = get_alternative_probe_event(dinfo, pev, &tmp, target);
+               ret = get_alternative_probe_event(dinfo, pev, &tmp);
                if (!ret) {
-                       ntevs = debuginfo__find_trace_events(dinfo, pev,
-                                                            tevs, max_tevs);
+                       ntevs = debuginfo__find_trace_events(dinfo, pev, tevs);
                        /*
                         * Write back to the original probe_event for
                         * setting appropriate (user given) event name
@@ -629,12 +687,15 @@ static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
        if (ntevs > 0) {        /* Succeeded to find trace events */
                pr_debug("Found %d probe_trace_events.\n", ntevs);
                ret = post_process_probe_trace_events(*tevs, ntevs,
-                                                       target, pev->uprobes);
-               if (ret < 0) {
+                                               pev->target, pev->uprobes);
+               if (ret < 0 || ret == ntevs) {
                        clear_probe_trace_events(*tevs, ntevs);
                        zfree(tevs);
                }
-               return ret < 0 ? ret : ntevs;
+               if (ret != ntevs)
+                       return ret < 0 ? ret : ntevs;
+               ntevs = 0;
+               /* Fall through */
        }
 
        if (ntevs == 0) {       /* No error but failed to find probe point. */
@@ -809,8 +870,7 @@ int show_line_range(struct line_range *lr, const char *module, bool user)
 
 static int show_available_vars_at(struct debuginfo *dinfo,
                                  struct perf_probe_event *pev,
-                                 int max_vls, struct strfilter *_filter,
-                                 bool externs, const char *target)
+                                 struct strfilter *_filter)
 {
        char *buf;
        int ret, i, nvars;
@@ -824,13 +884,12 @@ static int show_available_vars_at(struct debuginfo *dinfo,
                return -EINVAL;
        pr_debug("Searching variables at %s\n", buf);
 
-       ret = debuginfo__find_available_vars_at(dinfo, pev, &vls,
-                                               max_vls, externs);
+       ret = debuginfo__find_available_vars_at(dinfo, pev, &vls);
        if (!ret) {  /* Not found, retry with an alternative */
-               ret = get_alternative_probe_event(dinfo, pev, &tmp, target);
+               ret = get_alternative_probe_event(dinfo, pev, &tmp);
                if (!ret) {
                        ret = debuginfo__find_available_vars_at(dinfo, pev,
-                                               &vls, max_vls, externs);
+                                                               &vls);
                        /* Release the old probe_point */
                        clear_perf_probe_point(&tmp);
                }
@@ -877,8 +936,7 @@ end:
 
 /* Show available variables on given probe point */
 int show_available_vars(struct perf_probe_event *pevs, int npevs,
-                       int max_vls, const char *module,
-                       struct strfilter *_filter, bool externs)
+                       struct strfilter *_filter)
 {
        int i, ret = 0;
        struct debuginfo *dinfo;
@@ -887,7 +945,7 @@ int show_available_vars(struct perf_probe_event *pevs, int npevs,
        if (ret < 0)
                return ret;
 
-       dinfo = open_debuginfo(module, false);
+       dinfo = open_debuginfo(pevs->target, false);
        if (!dinfo) {
                ret = -ENOENT;
                goto out;
@@ -896,8 +954,7 @@ int show_available_vars(struct perf_probe_event *pevs, int npevs,
        setup_pager();
 
        for (i = 0; i < npevs && ret >= 0; i++)
-               ret = show_available_vars_at(dinfo, &pevs[i], max_vls, _filter,
-                                            externs, module);
+               ret = show_available_vars_at(dinfo, &pevs[i], _filter);
 
        debuginfo__delete(dinfo);
 out:
@@ -907,6 +964,10 @@ out:
 
 #else  /* !HAVE_DWARF_SUPPORT */
 
+static void debuginfo_cache__exit(void)
+{
+}
+
 static int
 find_perf_probe_point_from_dwarf(struct probe_trace_point *tp __maybe_unused,
                                 struct perf_probe_point *pp __maybe_unused,
@@ -916,9 +977,7 @@ find_perf_probe_point_from_dwarf(struct probe_trace_point *tp __maybe_unused,
 }
 
 static int try_to_find_probe_trace_events(struct perf_probe_event *pev,
-                               struct probe_trace_event **tevs __maybe_unused,
-                               int max_tevs __maybe_unused,
-                               const char *target __maybe_unused)
+                               struct probe_trace_event **tevs __maybe_unused)
 {
        if (perf_probe_event_need_dwarf(pev)) {
                pr_warning("Debuginfo-analysis is not supported.\n");
@@ -937,10 +996,8 @@ int show_line_range(struct line_range *lr __maybe_unused,
 }
 
 int show_available_vars(struct perf_probe_event *pevs __maybe_unused,
-                       int npevs __maybe_unused, int max_vls __maybe_unused,
-                       const char *module __maybe_unused,
-                       struct strfilter *filter __maybe_unused,
-                       bool externs __maybe_unused)
+                       int npevs __maybe_unused,
+                       struct strfilter *filter __maybe_unused)
 {
        pr_warning("Debuginfo-analysis is not supported.\n");
        return -ENOSYS;
@@ -980,6 +1037,18 @@ static int parse_line_num(char **ptr, int *val, const char *what)
        return 0;
 }
 
+/* Check the name is good for event, group or function */
+static bool is_c_func_name(const char *name)
+{
+       if (!isalpha(*name) && *name != '_')
+               return false;
+       while (*++name != '\0') {
+               if (!isalpha(*name) && !isdigit(*name) && *name != '_')
+                       return false;
+       }
+       return true;
+}
+
 /*
  * Stuff 'lr' according to the line range described by 'arg'.
  * The line range syntax is described by:
@@ -1048,10 +1117,15 @@ int parse_line_range_desc(const char *arg, struct line_range *lr)
                        goto err;
                }
                lr->function = name;
-       } else if (strchr(name, '.'))
+       } else if (strchr(name, '/') || strchr(name, '.'))
                lr->file = name;
-       else
+       else if (is_c_func_name(name))/* We reuse it for checking funcname */
                lr->function = name;
+       else {  /* Invalid name */
+               semantic_error("'%s' is not a valid function name.\n", name);
+               err = -EINVAL;
+               goto err;
+       }
 
        return 0;
 err:
@@ -1059,24 +1133,13 @@ err:
        return err;
 }
 
-/* Check the name is good for event/group */
-static bool check_event_name(const char *name)
-{
-       if (!isalpha(*name) && *name != '_')
-               return false;
-       while (*++name != '\0') {
-               if (!isalpha(*name) && !isdigit(*name) && *name != '_')
-                       return false;
-       }
-       return true;
-}
-
 /* Parse probepoint definition. */
 static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
 {
        struct perf_probe_point *pp = &pev->point;
        char *ptr, *tmp;
        char c, nc = 0;
+       bool file_spec = false;
        /*
         * <Syntax>
         * perf probe [EVENT=]SRC[:LN|;PTN]
@@ -1095,7 +1158,7 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
                        semantic_error("Group name is not supported yet.\n");
                        return -ENOTSUP;
                }
-               if (!check_event_name(arg)) {
+               if (!is_c_func_name(arg)) {
                        semantic_error("%s is bad for event name -it must "
                                       "follow C symbol-naming rule.\n", arg);
                        return -EINVAL;
@@ -1107,6 +1170,23 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
                arg = tmp;
        }
 
+       /*
+        * Check arg is function or file name and copy it.
+        *
+        * We consider arg to be a file spec if and only if it satisfies
+        * all of the below criteria::
+        * - it does not include any of "+@%",
+        * - it includes one of ":;", and
+        * - it has a period '.' in the name.
+        *
+        * Otherwise, we consider arg to be a function specification.
+        */
+       if (!strpbrk(arg, "+@%") && (ptr = strpbrk(arg, ";:")) != NULL) {
+               /* This is a file spec if it includes a '.' before ; or : */
+               if (memchr(arg, '.', ptr - arg))
+                       file_spec = true;
+       }
+
        ptr = strpbrk(arg, ";:+@%");
        if (ptr) {
                nc = *ptr;
@@ -1117,10 +1197,9 @@ static int parse_perf_probe_point(char *arg, struct perf_probe_event *pev)
        if (tmp == NULL)
                return -ENOMEM;
 
-       /* Check arg is function or file and copy it */
-       if (strchr(tmp, '.'))   /* File */
+       if (file_spec)
                pp->file = tmp;
-       else                    /* Function */
+       else
                pp->function = tmp;
 
        /* Parse other options */
@@ -1762,8 +1841,7 @@ static int find_perf_probe_point_from_map(struct probe_trace_point *tp,
 
 out:
        if (map && !is_kprobe) {
-               dso__delete(map->dso);
-               map__delete(map);
+               map__put(map);
        }
 
        return ret;
@@ -1877,6 +1955,7 @@ static void clear_probe_trace_event(struct probe_trace_event *tev)
        free(tev->event);
        free(tev->group);
        free(tev->point.symbol);
+       free(tev->point.realname);
        free(tev->point.module);
        for (i = 0; i < tev->nargs; i++) {
                free(tev->args[i].name);
@@ -1954,7 +2033,7 @@ static int open_probe_events(const char *trace_file, bool readwrite)
        if (ret >= 0) {
                pr_debug("Opening %s write=%d\n", buf, readwrite);
                if (readwrite && !probe_event_dry_run)
-                       ret = open(buf, O_RDWR, O_APPEND);
+                       ret = open(buf, O_RDWR | O_APPEND, 0);
                else
                        ret = open(buf, O_RDONLY, 0);
 
@@ -2095,9 +2174,31 @@ kprobe_blacklist__find_by_address(struct list_head *blacklist,
        return NULL;
 }
 
-/* Show an event */
-static int show_perf_probe_event(struct perf_probe_event *pev,
-                                const char *module)
+static LIST_HEAD(kprobe_blacklist);
+
+static void kprobe_blacklist__init(void)
+{
+       if (!list_empty(&kprobe_blacklist))
+               return;
+
+       if (kprobe_blacklist__load(&kprobe_blacklist) < 0)
+               pr_debug("No kprobe blacklist support, ignored\n");
+}
+
+static void kprobe_blacklist__release(void)
+{
+       kprobe_blacklist__delete(&kprobe_blacklist);
+}
+
+static bool kprobe_blacklist__listed(unsigned long address)
+{
+       return !!kprobe_blacklist__find_by_address(&kprobe_blacklist, address);
+}
+
+static int perf_probe_event__sprintf(const char *group, const char *event,
+                                    struct perf_probe_event *pev,
+                                    const char *module,
+                                    struct strbuf *result)
 {
        int i, ret;
        char buf[128];
@@ -2108,30 +2209,67 @@ static int show_perf_probe_event(struct perf_probe_event *pev,
        if (!place)
                return -EINVAL;
 
-       ret = e_snprintf(buf, 128, "%s:%s", pev->group, pev->event);
+       ret = e_snprintf(buf, 128, "%s:%s", group, event);
        if (ret < 0)
-               return ret;
+               goto out;
 
-       pr_info("  %-20s (on %s", buf, place);
+       strbuf_addf(result, "  %-20s (on %s", buf, place);
        if (module)
-               pr_info(" in %s", module);
+               strbuf_addf(result, " in %s", module);
 
        if (pev->nargs > 0) {
-               pr_info(" with");
+               strbuf_addstr(result, " with");
                for (i = 0; i < pev->nargs; i++) {
                        ret = synthesize_perf_probe_arg(&pev->args[i],
                                                        buf, 128);
                        if (ret < 0)
-                               break;
-                       pr_info(" %s", buf);
+                               goto out;
+                       strbuf_addf(result, " %s", buf);
                }
        }
-       pr_info(")\n");
+       strbuf_addch(result, ')');
+out:
        free(place);
        return ret;
 }
 
-static int __show_perf_probe_events(int fd, bool is_kprobe)
+/* Show an event */
+static int show_perf_probe_event(const char *group, const char *event,
+                                struct perf_probe_event *pev,
+                                const char *module, bool use_stdout)
+{
+       struct strbuf buf = STRBUF_INIT;
+       int ret;
+
+       ret = perf_probe_event__sprintf(group, event, pev, module, &buf);
+       if (ret >= 0) {
+               if (use_stdout)
+                       printf("%s\n", buf.buf);
+               else
+                       pr_info("%s\n", buf.buf);
+       }
+       strbuf_release(&buf);
+
+       return ret;
+}
+
+static bool filter_probe_trace_event(struct probe_trace_event *tev,
+                                    struct strfilter *filter)
+{
+       char tmp[128];
+
+       /* At first, check the event name itself */
+       if (strfilter__compare(filter, tev->event))
+               return true;
+
+       /* Next, check the combination of name and group */
+       if (e_snprintf(tmp, 128, "%s:%s", tev->group, tev->event) < 0)
+               return false;
+       return strfilter__compare(filter, tmp);
+}
+
+static int __show_perf_probe_events(int fd, bool is_kprobe,
+                                   struct strfilter *filter)
 {
        int ret = 0;
        struct probe_trace_event tev;
@@ -2149,24 +2287,31 @@ static int __show_perf_probe_events(int fd, bool is_kprobe)
        strlist__for_each(ent, rawlist) {
                ret = parse_probe_trace_command(ent->s, &tev);
                if (ret >= 0) {
+                       if (!filter_probe_trace_event(&tev, filter))
+                               goto next;
                        ret = convert_to_perf_probe_event(&tev, &pev,
                                                                is_kprobe);
-                       if (ret >= 0)
-                               ret = show_perf_probe_event(&pev,
-                                                           tev.point.module);
+                       if (ret < 0)
+                               goto next;
+                       ret = show_perf_probe_event(pev.group, pev.event,
+                                                   &pev, tev.point.module,
+                                                   true);
                }
+next:
                clear_perf_probe_event(&pev);
                clear_probe_trace_event(&tev);
                if (ret < 0)
                        break;
        }
        strlist__delete(rawlist);
+       /* Cleanup cached debuginfo if needed */
+       debuginfo_cache__exit();
 
        return ret;
 }
 
 /* List up current perf-probe events */
-int show_perf_probe_events(void)
+int show_perf_probe_events(struct strfilter *filter)
 {
        int kp_fd, up_fd, ret;
 
@@ -2178,7 +2323,7 @@ int show_perf_probe_events(void)
 
        kp_fd = open_kprobe_events(false);
        if (kp_fd >= 0) {
-               ret = __show_perf_probe_events(kp_fd, true);
+               ret = __show_perf_probe_events(kp_fd, true, filter);
                close(kp_fd);
                if (ret < 0)
                        goto out;
@@ -2192,7 +2337,7 @@ int show_perf_probe_events(void)
        }
 
        if (up_fd >= 0) {
-               ret = __show_perf_probe_events(up_fd, false);
+               ret = __show_perf_probe_events(up_fd, false, filter);
                close(up_fd);
        }
 out:
@@ -2266,6 +2411,10 @@ static int get_new_event_name(char *buf, size_t len, const char *base,
                              struct strlist *namelist, bool allow_suffix)
 {
        int i, ret;
+       char *p;
+
+       if (*base == '.')
+               base++;
 
        /* Try no suffix */
        ret = e_snprintf(buf, len, "%s", base);
@@ -2273,6 +2422,10 @@ static int get_new_event_name(char *buf, size_t len, const char *base,
                pr_debug("snprintf() failed: %d\n", ret);
                return ret;
        }
+       /* Cut off the postfixes (e.g. .const, .isra)*/
+       p = strchr(buf, '.');
+       if (p && p != buf)
+               *p = '\0';
        if (!strlist__has_entry(namelist, buf))
                return 0;
 
@@ -2328,10 +2481,9 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
        int i, fd, ret;
        struct probe_trace_event *tev = NULL;
        char buf[64];
-       const char *event, *group;
+       const char *event = NULL, *group = NULL;
        struct strlist *namelist;
-       LIST_HEAD(blacklist);
-       struct kprobe_blacklist_node *node;
+       bool safename;
 
        if (pev->uprobes)
                fd = open_uprobe_events(true);
@@ -2347,34 +2499,26 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
        namelist = get_probe_trace_event_names(fd, false);
        if (!namelist) {
                pr_debug("Failed to get current event list.\n");
-               return -EIO;
-       }
-       /* Get kprobe blacklist if exists */
-       if (!pev->uprobes) {
-               ret = kprobe_blacklist__load(&blacklist);
-               if (ret < 0)
-                       pr_debug("No kprobe blacklist support, ignored\n");
+               ret = -ENOMEM;
+               goto close_out;
        }
 
+       safename = (pev->point.function && !strisglob(pev->point.function));
        ret = 0;
        pr_info("Added new event%s\n", (ntevs > 1) ? "s:" : ":");
        for (i = 0; i < ntevs; i++) {
                tev = &tevs[i];
-               /* Ensure that the address is NOT blacklisted */
-               node = kprobe_blacklist__find_by_address(&blacklist,
-                                                        tev->point.address);
-               if (node) {
-                       pr_warning("Warning: Skipped probing on blacklisted function: %s\n", node->symbol);
+               /* Skip if the symbol is out of .text or blacklisted */
+               if (!tev->point.symbol)
                        continue;
-               }
 
                if (pev->event)
                        event = pev->event;
                else
-                       if (pev->point.function)
+                       if (safename)
                                event = pev->point.function;
                        else
-                               event = tev->point.symbol;
+                               event = tev->point.realname;
                if (pev->group)
                        group = pev->group;
                else
@@ -2399,15 +2543,12 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
                /* Add added event name to namelist */
                strlist__add(namelist, event);
 
-               /* Trick here - save current event/group */
-               event = pev->event;
-               group = pev->group;
-               pev->event = tev->event;
-               pev->group = tev->group;
-               show_perf_probe_event(pev, tev->point.module);
-               /* Trick here - restore current event/group */
-               pev->event = (char *)event;
-               pev->group = (char *)group;
+               /* We use tev's name for showing new events */
+               show_perf_probe_event(tev->group, tev->event, pev,
+                                     tev->point.module, false);
+               /* Save the last valid name */
+               event = tev->event;
+               group = tev->group;
 
                /*
                 * Probes after the first probe which comes from same
@@ -2421,26 +2562,34 @@ static int __add_probe_trace_events(struct perf_probe_event *pev,
                warn_uprobe_event_compat(tev);
 
        /* Note that it is possible to skip all events because of blacklist */
-       if (ret >= 0 && tev->event) {
+       if (ret >= 0 && event) {
                /* Show how to use the event. */
                pr_info("\nYou can now use it in all perf tools, such as:\n\n");
-               pr_info("\tperf record -e %s:%s -aR sleep 1\n\n", tev->group,
-                        tev->event);
+               pr_info("\tperf record -e %s:%s -aR sleep 1\n\n", group, event);
        }
 
-       kprobe_blacklist__delete(&blacklist);
        strlist__delete(namelist);
+close_out:
        close(fd);
        return ret;
 }
 
-static int find_probe_functions(struct map *map, char *name)
+static int find_probe_functions(struct map *map, char *name,
+                               struct symbol **syms)
 {
        int found = 0;
        struct symbol *sym;
+       struct rb_node *tmp;
+
+       if (map__load(map, NULL) < 0)
+               return 0;
 
-       map__for_each_symbol_by_name(map, name, sym) {
-               found++;
+       map__for_each_symbol(map, sym, tmp) {
+               if (strglobmatch(sym->name, name)) {
+                       found++;
+                       if (syms && found < probe_conf.max_probes)
+                               syms[found - 1] = sym;
+               }
        }
 
        return found;
@@ -2449,42 +2598,52 @@ static int find_probe_functions(struct map *map, char *name)
 #define strdup_or_goto(str, label)     \
        ({ char *__p = strdup(str); if (!__p) goto label; __p; })
 
+void __weak arch__fix_tev_from_maps(struct perf_probe_event *pev __maybe_unused,
+                               struct probe_trace_event *tev __maybe_unused,
+                               struct map *map __maybe_unused) { }
+
 /*
  * Find probe function addresses from map.
  * Return an error or the number of found probe_trace_event
  */
 static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
-                                           struct probe_trace_event **tevs,
-                                           int max_tevs, const char *target)
+                                           struct probe_trace_event **tevs)
 {
        struct map *map = NULL;
        struct ref_reloc_sym *reloc_sym = NULL;
        struct symbol *sym;
+       struct symbol **syms = NULL;
        struct probe_trace_event *tev;
        struct perf_probe_point *pp = &pev->point;
        struct probe_trace_point *tp;
        int num_matched_functions;
-       int ret, i;
+       int ret, i, j, skipped = 0;
 
-       map = get_target_map(target, pev->uprobes);
+       map = get_target_map(pev->target, pev->uprobes);
        if (!map) {
                ret = -EINVAL;
                goto out;
        }
 
+       syms = malloc(sizeof(struct symbol *) * probe_conf.max_probes);
+       if (!syms) {
+               ret = -ENOMEM;
+               goto out;
+       }
+
        /*
         * Load matched symbols: Since the different local symbols may have
         * same name but different addresses, this lists all the symbols.
         */
-       num_matched_functions = find_probe_functions(map, pp->function);
+       num_matched_functions = find_probe_functions(map, pp->function, syms);
        if (num_matched_functions == 0) {
                pr_err("Failed to find symbol %s in %s\n", pp->function,
-                       target ? : "kernel");
+                       pev->target ? : "kernel");
                ret = -ENOENT;
                goto out;
-       } else if (num_matched_functions > max_tevs) {
+       } else if (num_matched_functions > probe_conf.max_probes) {
                pr_err("Too many functions matched in %s\n",
-                       target ? : "kernel");
+                       pev->target ? : "kernel");
                ret = -E2BIG;
                goto out;
        }
@@ -2507,7 +2666,9 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
 
        ret = 0;
 
-       map__for_each_symbol_by_name(map, pp->function, sym) {
+       for (j = 0; j < num_matched_functions; j++) {
+               sym = syms[j];
+
                tev = (*tevs) + ret;
                tp = &tev->point;
                if (ret == num_matched_functions) {
@@ -2524,16 +2685,24 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
                }
                /* Add one probe point */
                tp->address = map->unmap_ip(map, sym->start) + pp->offset;
-               if (reloc_sym) {
+               /* If we found a wrong one, mark it by NULL symbol */
+               if (!pev->uprobes &&
+                   kprobe_warn_out_range(sym->name, tp->address)) {
+                       tp->symbol = NULL;      /* Skip it */
+                       skipped++;
+               } else if (reloc_sym) {
                        tp->symbol = strdup_or_goto(reloc_sym->name, nomem_out);
                        tp->offset = tp->address - reloc_sym->addr;
                } else {
                        tp->symbol = strdup_or_goto(sym->name, nomem_out);
                        tp->offset = pp->offset;
                }
+               tp->realname = strdup_or_goto(sym->name, nomem_out);
+
                tp->retprobe = pp->retprobe;
-               if (target)
-                       tev->point.module = strdup_or_goto(target, nomem_out);
+               if (pev->target)
+                       tev->point.module = strdup_or_goto(pev->target,
+                                                          nomem_out);
                tev->uprobes = pev->uprobes;
                tev->nargs = pev->nargs;
                if (tev->nargs) {
@@ -2555,10 +2724,16 @@ static int find_probe_trace_events_from_map(struct perf_probe_event *pev,
                                        strdup_or_goto(pev->args[i].type,
                                                        nomem_out);
                }
+               arch__fix_tev_from_maps(pev, tev, map);
+       }
+       if (ret == skipped) {
+               ret = -ENOENT;
+               goto err_out;
        }
 
 out:
        put_target_map(map, pev->uprobes);
+       free(syms);
        return ret;
 
 nomem_out:
@@ -2569,27 +2744,34 @@ err_out:
        goto out;
 }
 
+bool __weak arch__prefers_symtab(void) { return false; }
+
 static int convert_to_probe_trace_events(struct perf_probe_event *pev,
-                                         struct probe_trace_event **tevs,
-                                         int max_tevs, const char *target)
+                                        struct probe_trace_event **tevs)
 {
        int ret;
 
        if (pev->uprobes && !pev->group) {
                /* Replace group name if not given */
-               ret = convert_exec_to_group(target, &pev->group);
+               ret = convert_exec_to_group(pev->target, &pev->group);
                if (ret != 0) {
                        pr_warning("Failed to make a group name.\n");
                        return ret;
                }
        }
 
+       if (arch__prefers_symtab() && !perf_probe_event_need_dwarf(pev)) {
+               ret = find_probe_trace_events_from_map(pev, tevs);
+               if (ret > 0)
+                       return ret; /* Found in symbol table */
+       }
+
        /* Convert perf_probe_event with debuginfo */
-       ret = try_to_find_probe_trace_events(pev, tevs, max_tevs, target);
+       ret = try_to_find_probe_trace_events(pev, tevs);
        if (ret != 0)
                return ret;     /* Found in debuginfo or got an error */
 
-       return find_probe_trace_events_from_map(pev, tevs, max_tevs, target);
+       return find_probe_trace_events_from_map(pev, tevs);
 }
 
 struct __event_package {
@@ -2598,8 +2780,7 @@ struct __event_package {
        int                             ntevs;
 };
 
-int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
-                         int max_tevs, bool force_add)
+int add_perf_probe_events(struct perf_probe_event *pevs, int npevs)
 {
        int i, j, ret;
        struct __event_package *pkgs;
@@ -2619,20 +2800,24 @@ int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
        /* Loop 1: convert all events */
        for (i = 0; i < npevs; i++) {
                pkgs[i].pev = &pevs[i];
+               /* Init kprobe blacklist if needed */
+               if (!pkgs[i].pev->uprobes)
+                       kprobe_blacklist__init();
                /* Convert with or without debuginfo */
                ret  = convert_to_probe_trace_events(pkgs[i].pev,
-                                                    &pkgs[i].tevs,
-                                                    max_tevs,
-                                                    pkgs[i].pev->target);
+                                                    &pkgs[i].tevs);
                if (ret < 0)
                        goto end;
                pkgs[i].ntevs = ret;
        }
+       /* This just release blacklist only if allocated */
+       kprobe_blacklist__release();
 
        /* Loop 2: add all events */
        for (i = 0; i < npevs; i++) {
                ret = __add_probe_trace_events(pkgs[i].pev, pkgs[i].tevs,
-                                               pkgs[i].ntevs, force_add);
+                                              pkgs[i].ntevs,
+                                              probe_conf.force_add);
                if (ret < 0)
                        break;
        }
@@ -2684,40 +2869,39 @@ error:
        return ret;
 }
 
-static int del_trace_probe_event(int fd, const char *buf,
-                                                 struct strlist *namelist)
+static int del_trace_probe_events(int fd, struct strfilter *filter,
+                                 struct strlist *namelist)
 {
-       struct str_node *ent, *n;
-       int ret = -1;
+       struct str_node *ent;
+       const char *p;
+       int ret = -ENOENT;
 
-       if (strpbrk(buf, "*?")) { /* Glob-exp */
-               strlist__for_each_safe(ent, n, namelist)
-                       if (strglobmatch(ent->s, buf)) {
-                               ret = __del_trace_probe_event(fd, ent);
-                               if (ret < 0)
-                                       break;
-                               strlist__remove(namelist, ent);
-                       }
-       } else {
-               ent = strlist__find(namelist, buf);
-               if (ent) {
+       if (!namelist)
+               return -ENOENT;
+
+       strlist__for_each(ent, namelist) {
+               p = strchr(ent->s, ':');
+               if ((p && strfilter__compare(filter, p + 1)) ||
+                   strfilter__compare(filter, ent->s)) {
                        ret = __del_trace_probe_event(fd, ent);
-                       if (ret >= 0)
-                               strlist__remove(namelist, ent);
+                       if (ret < 0)
+                               break;
                }
        }
 
        return ret;
 }
 
-int del_perf_probe_events(struct strlist *dellist)
+int del_perf_probe_events(struct strfilter *filter)
 {
-       int ret = -1, ufd = -1, kfd = -1;
-       char buf[128];
-       const char *group, *event;
-       char *p, *str;
-       struct str_node *ent;
+       int ret, ret2, ufd = -1, kfd = -1;
        struct strlist *namelist = NULL, *unamelist = NULL;
+       char *str = strfilter__string(filter);
+
+       if (!str)
+               return -EINVAL;
+
+       pr_debug("Delete filter: \'%s\'\n", str);
 
        /* Get current event names */
        kfd = open_kprobe_events(true);
@@ -2730,49 +2914,23 @@ int del_perf_probe_events(struct strlist *dellist)
 
        if (kfd < 0 && ufd < 0) {
                print_both_open_warning(kfd, ufd);
+               ret = kfd;
                goto error;
        }
 
-       if (namelist == NULL && unamelist == NULL)
+       ret = del_trace_probe_events(kfd, filter, namelist);
+       if (ret < 0 && ret != -ENOENT)
                goto error;
 
-       strlist__for_each(ent, dellist) {
-               str = strdup(ent->s);
-               if (str == NULL) {
-                       ret = -ENOMEM;
-                       goto error;
-               }
-               pr_debug("Parsing: %s\n", str);
-               p = strchr(str, ':');
-               if (p) {
-                       group = str;
-                       *p = '\0';
-                       event = p + 1;
-               } else {
-                       group = "*";
-                       event = str;
-               }
-
-               ret = e_snprintf(buf, 128, "%s:%s", group, event);
-               if (ret < 0) {
-                       pr_err("Failed to copy event.");
-                       free(str);
-                       goto error;
-               }
-
-               pr_debug("Group: %s, Event: %s\n", group, event);
-
-               if (namelist)
-                       ret = del_trace_probe_event(kfd, buf, namelist);
-
-               if (unamelist && ret != 0)
-                       ret = del_trace_probe_event(ufd, buf, unamelist);
-
-               if (ret != 0)
-                       pr_info("Info: Event \"%s\" does not exist.\n", buf);
-
-               free(str);
+       ret2 = del_trace_probe_events(ufd, filter, unamelist);
+       if (ret2 < 0 && ret2 != -ENOENT) {
+               ret = ret2;
+               goto error;
        }
+       if (ret == -ENOENT && ret2 == -ENOENT)
+               pr_debug("\"%s\" does not hit any event.\n", str);
+               /* Note that this is silently ignored */
+       ret = 0;
 
 error:
        if (kfd >= 0) {
@@ -2784,6 +2942,7 @@ error:
                strlist__delete(unamelist);
                close(ufd);
        }
+       free(str);
 
        return ret;
 }
@@ -2837,8 +2996,7 @@ int show_available_funcs(const char *target, struct strfilter *_filter,
        dso__fprintf_symbols_by_name(map->dso, map->type, stdout);
 end:
        if (user) {
-               dso__delete(map->dso);
-               map__delete(map);
+               map__put(map);
        }
        exit_symbol_maps();
 
index d6b783447be95d6b8764187f05692e0559eed371..31db6ee7db5478139dabfc97537568c0ec104736 100644 (file)
@@ -6,10 +6,20 @@
 #include "strlist.h"
 #include "strfilter.h"
 
+/* Probe related configurations */
+struct probe_conf {
+       bool    show_ext_vars;
+       bool    show_location_range;
+       bool    force_add;
+       bool    no_inlines;
+       int     max_probes;
+};
+extern struct probe_conf probe_conf;
 extern bool probe_event_dry_run;
 
 /* kprobe-tracer and uprobe-tracer tracing point */
 struct probe_trace_point {
+       char            *realname;      /* function real name (if needed) */
        char            *symbol;        /* Base symbol */
        char            *module;        /* Module name */
        unsigned long   offset;         /* Offset from symbol */
@@ -121,20 +131,18 @@ extern void line_range__clear(struct line_range *lr);
 /* Initialize line range */
 extern int line_range__init(struct line_range *lr);
 
-/* Internal use: Return kernel/module path */
-extern const char *kernel_get_module_path(const char *module);
-
-extern int add_perf_probe_events(struct perf_probe_event *pevs, int npevs,
-                                int max_probe_points, bool force_add);
-extern int del_perf_probe_events(struct strlist *dellist);
-extern int show_perf_probe_events(void);
+extern int add_perf_probe_events(struct perf_probe_event *pevs, int npevs);
+extern int del_perf_probe_events(struct strfilter *filter);
+extern int show_perf_probe_events(struct strfilter *filter);
 extern int show_line_range(struct line_range *lr, const char *module,
                           bool user);
 extern int show_available_vars(struct perf_probe_event *pevs, int npevs,
-                              int max_probe_points, const char *module,
-                              struct strfilter *filter, bool externs);
+                              struct strfilter *filter);
 extern int show_available_funcs(const char *module, struct strfilter *filter,
                                bool user);
+bool arch__prefers_symtab(void);
+void arch__fix_tev_from_maps(struct perf_probe_event *pev,
+                            struct probe_trace_event *tev, struct map *map);
 
 /* Maximum index number of event-name postfix */
 #define MAX_EVENT_INDEX        1024
index 2a76e14db73289d196a0171f4830693b46445e23..2da65a7108932857bd585681eb0fac195f0c850b 100644 (file)
@@ -130,7 +130,7 @@ struct debuginfo *debuginfo__new(const char *path)
                        continue;
                dinfo = __debuginfo__new(buf);
        }
-       dso__delete(dso);
+       dso__put(dso);
 
 out:
        /* if failed to open all distro debuginfo, open given binary */
@@ -177,7 +177,7 @@ static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr,
        Dwarf_Word offs = 0;
        bool ref = false;
        const char *regs;
-       int ret;
+       int ret, ret2 = 0;
 
        if (dwarf_attr(vr_die, DW_AT_external, &attr) != NULL)
                goto static_var;
@@ -187,9 +187,19 @@ static int convert_variable_location(Dwarf_Die *vr_die, Dwarf_Addr addr,
                return -EINVAL; /* Broken DIE ? */
        if (dwarf_getlocation_addr(&attr, addr, &op, &nops, 1) <= 0) {
                ret = dwarf_entrypc(sp_die, &tmp);
-               if (ret || addr != tmp ||
-                   dwarf_tag(vr_die) != DW_TAG_formal_parameter ||
-                   dwarf_highpc(sp_die, &tmp))
+               if (ret)
+                       return -ENOENT;
+
+               if (probe_conf.show_location_range &&
+                       (dwarf_tag(vr_die) == DW_TAG_variable)) {
+                       ret2 = -ERANGE;
+               } else if (addr != tmp ||
+                       dwarf_tag(vr_die) != DW_TAG_formal_parameter) {
+                       return -ENOENT;
+               }
+
+               ret = dwarf_highpc(sp_die, &tmp);
+               if (ret)
                        return -ENOENT;
                /*
                 * This is fuzzed by fentry mcount. We try to find the
@@ -210,7 +220,7 @@ found:
        if (op->atom == DW_OP_addr) {
 static_var:
                if (!tvar)
-                       return 0;
+                       return ret2;
                /* Static variables on memory (not stack), make @varname */
                ret = strlen(dwarf_diename(vr_die));
                tvar->value = zalloc(ret + 2);
@@ -220,7 +230,7 @@ static_var:
                tvar->ref = alloc_trace_arg_ref((long)offs);
                if (tvar->ref == NULL)
                        return -ENOMEM;
-               return 0;
+               return ret2;
        }
 
        /* If this is based on frame buffer, set the offset */
@@ -250,14 +260,14 @@ static_var:
        }
 
        if (!tvar)
-               return 0;
+               return ret2;
 
        regs = get_arch_regstr(regn);
        if (!regs) {
                /* This should be a bug in DWARF or this tool */
                pr_warning("Mapping for the register number %u "
                           "missing on this architecture.\n", regn);
-               return -ERANGE;
+               return -ENOTSUP;
        }
 
        tvar->value = strdup(regs);
@@ -269,7 +279,7 @@ static_var:
                if (tvar->ref == NULL)
                        return -ENOMEM;
        }
-       return 0;
+       return ret2;
 }
 
 #define BYTES_TO_BITS(nb)      ((nb) * BITS_PER_LONG / sizeof(long))
@@ -517,10 +527,12 @@ static int convert_variable(Dwarf_Die *vr_die, struct probe_finder *pf)
 
        ret = convert_variable_location(vr_die, pf->addr, pf->fb_ops,
                                        &pf->sp_die, pf->tvar);
-       if (ret == -ENOENT || ret == -EINVAL)
-               pr_err("Failed to find the location of %s at this address.\n"
-                      " Perhaps, it has been optimized out.\n", pf->pvar->var);
-       else if (ret == -ENOTSUP)
+       if (ret == -ENOENT || ret == -EINVAL) {
+               pr_err("Failed to find the location of the '%s' variable at this address.\n"
+                      " Perhaps it has been optimized out.\n"
+                      " Use -V with the --range option to show '%s' location range.\n",
+                      pf->pvar->var, pf->pvar->var);
+       } else if (ret == -ENOTSUP)
                pr_err("Sorry, we don't support this variable location yet.\n");
        else if (ret == 0 && pf->pvar->field) {
                ret = convert_variable_fields(vr_die, pf->pvar->var,
@@ -662,9 +674,15 @@ static int call_probe_finder(Dwarf_Die *sc_die, struct probe_finder *pf)
        /* If not a real subprogram, find a real one */
        if (!die_is_func_def(sc_die)) {
                if (!die_find_realfunc(&pf->cu_die, pf->addr, &pf->sp_die)) {
-                       pr_warning("Failed to find probe point in any "
-                                  "functions.\n");
-                       return -ENOENT;
+                       if (die_find_tailfunc(&pf->cu_die, pf->addr, &pf->sp_die)) {
+                               pr_warning("Ignoring tail call from %s\n",
+                                               dwarf_diename(&pf->sp_die));
+                               return 0;
+                       } else {
+                               pr_warning("Failed to find probe point in any "
+                                          "functions.\n");
+                               return -ENOENT;
+                       }
                }
        } else
                memcpy(&pf->sp_die, sc_die, sizeof(Dwarf_Die));
@@ -719,7 +737,7 @@ static int find_best_scope_cb(Dwarf_Die *fn_die, void *data)
        }
        /* If the function name is given, that's what user expects */
        if (fsp->function) {
-               if (die_compare_name(fn_die, fsp->function)) {
+               if (die_match_name(fn_die, fsp->function)) {
                        memcpy(fsp->die_mem, fn_die, sizeof(Dwarf_Die));
                        fsp->found = true;
                        return 1;
@@ -922,13 +940,14 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
 
        /* Check tag and diename */
        if (!die_is_func_def(sp_die) ||
-           !die_compare_name(sp_die, pp->function))
+           !die_match_name(sp_die, pp->function))
                return DWARF_CB_OK;
 
        /* Check declared file */
        if (pp->file && strtailcmp(pp->file, dwarf_decl_file(sp_die)))
                return DWARF_CB_OK;
 
+       pr_debug("Matched function: %s\n", dwarf_diename(sp_die));
        pf->fname = dwarf_decl_file(sp_die);
        if (pp->line) { /* Function relative line */
                dwarf_decl_line(sp_die, &pf->lno);
@@ -945,10 +964,20 @@ static int probe_point_search_cb(Dwarf_Die *sp_die, void *data)
                        /* TODO: Check the address in this function */
                        param->retval = call_probe_finder(sp_die, pf);
                }
-       } else
+       } else if (!probe_conf.no_inlines) {
                /* Inlined function: search instances */
                param->retval = die_walk_instances(sp_die,
                                        probe_point_inline_cb, (void *)pf);
+               /* This could be a non-existed inline definition */
+               if (param->retval == -ENOENT && strisglob(pp->function))
+                       param->retval = 0;
+       }
+
+       /* We need to find other candidates */
+       if (strisglob(pp->function) && param->retval >= 0) {
+               param->retval = 0;      /* We have to clear the result */
+               return DWARF_CB_OK;
+       }
 
        return DWARF_CB_ABORT; /* Exit; no same symbol in this CU. */
 }
@@ -977,7 +1006,7 @@ static int pubname_search_cb(Dwarf *dbg, Dwarf_Global *gl, void *data)
                if (dwarf_tag(param->sp_die) != DW_TAG_subprogram)
                        return DWARF_CB_OK;
 
-               if (die_compare_name(param->sp_die, param->function)) {
+               if (die_match_name(param->sp_die, param->function)) {
                        if (!dwarf_offdie(dbg, gl->cu_offset, param->cu_die))
                                return DWARF_CB_OK;
 
@@ -1030,7 +1059,7 @@ static int debuginfo__find_probes(struct debuginfo *dbg,
                return -ENOMEM;
 
        /* Fastpath: lookup by function name from .debug_pubnames section */
-       if (pp->function) {
+       if (pp->function && !strisglob(pp->function)) {
                struct pubname_callback_param pubname_param = {
                        .function = pp->function,
                        .file     = pp->file,
@@ -1089,6 +1118,7 @@ found:
 struct local_vars_finder {
        struct probe_finder *pf;
        struct perf_probe_arg *args;
+       bool vars;
        int max_args;
        int nargs;
        int ret;
@@ -1103,7 +1133,7 @@ static int copy_variables_cb(Dwarf_Die *die_mem, void *data)
 
        tag = dwarf_tag(die_mem);
        if (tag == DW_TAG_formal_parameter ||
-           tag == DW_TAG_variable) {
+           (tag == DW_TAG_variable && vf->vars)) {
                if (convert_variable_location(die_mem, vf->pf->addr,
                                              vf->pf->fb_ops, &pf->sp_die,
                                              NULL) == 0) {
@@ -1129,26 +1159,28 @@ static int expand_probe_args(Dwarf_Die *sc_die, struct probe_finder *pf,
        Dwarf_Die die_mem;
        int i;
        int n = 0;
-       struct local_vars_finder vf = {.pf = pf, .args = args,
+       struct local_vars_finder vf = {.pf = pf, .args = args, .vars = false,
                                .max_args = MAX_PROBE_ARGS, .ret = 0};
 
        for (i = 0; i < pf->pev->nargs; i++) {
                /* var never be NULL */
-               if (strcmp(pf->pev->args[i].var, "$vars") == 0) {
-                       pr_debug("Expanding $vars into:");
-                       vf.nargs = n;
-                       /* Special local variables */
-                       die_find_child(sc_die, copy_variables_cb, (void *)&vf,
-                                      &die_mem);
-                       pr_debug(" (%d)\n", vf.nargs - n);
-                       if (vf.ret < 0)
-                               return vf.ret;
-                       n = vf.nargs;
-               } else {
+               if (strcmp(pf->pev->args[i].var, PROBE_ARG_VARS) == 0)
+                       vf.vars = true;
+               else if (strcmp(pf->pev->args[i].var, PROBE_ARG_PARAMS) != 0) {
                        /* Copy normal argument */
                        args[n] = pf->pev->args[i];
                        n++;
+                       continue;
                }
+               pr_debug("Expanding %s into:", pf->pev->args[i].var);
+               vf.nargs = n;
+               /* Special local variables */
+               die_find_child(sc_die, copy_variables_cb, (void *)&vf,
+                              &die_mem);
+               pr_debug(" (%d)\n", vf.nargs - n);
+               if (vf.ret < 0)
+                       return vf.ret;
+               n = vf.nargs;
        }
        return n;
 }
@@ -1176,6 +1208,10 @@ static int add_probe_trace_event(Dwarf_Die *sc_die, struct probe_finder *pf)
        if (ret < 0)
                return ret;
 
+       tev->point.realname = strdup(dwarf_diename(sc_die));
+       if (!tev->point.realname)
+               return -ENOMEM;
+
        pr_debug("Probe point found: %s+%lu\n", tev->point.symbol,
                 tev->point.offset);
 
@@ -1213,15 +1249,15 @@ end:
 /* Find probe_trace_events specified by perf_probe_event from debuginfo */
 int debuginfo__find_trace_events(struct debuginfo *dbg,
                                 struct perf_probe_event *pev,
-                                struct probe_trace_event **tevs, int max_tevs)
+                                struct probe_trace_event **tevs)
 {
        struct trace_event_finder tf = {
                        .pf = {.pev = pev, .callback = add_probe_trace_event},
-                       .mod = dbg->mod, .max_tevs = max_tevs};
+                       .max_tevs = probe_conf.max_probes, .mod = dbg->mod};
        int ret;
 
        /* Allocate result tevs array */
-       *tevs = zalloc(sizeof(struct probe_trace_event) * max_tevs);
+       *tevs = zalloc(sizeof(struct probe_trace_event) * tf.max_tevs);
        if (*tevs == NULL)
                return -ENOMEM;
 
@@ -1237,14 +1273,11 @@ int debuginfo__find_trace_events(struct debuginfo *dbg,
        return (ret < 0) ? ret : tf.ntevs;
 }
 
-#define MAX_VAR_LEN 64
-
 /* Collect available variables in this scope */
 static int collect_variables_cb(Dwarf_Die *die_mem, void *data)
 {
        struct available_var_finder *af = data;
        struct variable_list *vl;
-       char buf[MAX_VAR_LEN];
        int tag, ret;
 
        vl = &af->vls[af->nvls - 1];
@@ -1255,11 +1288,38 @@ static int collect_variables_cb(Dwarf_Die *die_mem, void *data)
                ret = convert_variable_location(die_mem, af->pf.addr,
                                                af->pf.fb_ops, &af->pf.sp_die,
                                                NULL);
-               if (ret == 0) {
-                       ret = die_get_varname(die_mem, buf, MAX_VAR_LEN);
-                       pr_debug2("Add new var: %s\n", buf);
-                       if (ret > 0)
-                               strlist__add(vl->vars, buf);
+               if (ret == 0 || ret == -ERANGE) {
+                       int ret2;
+                       bool externs = !af->child;
+                       struct strbuf buf;
+
+                       strbuf_init(&buf, 64);
+
+                       if (probe_conf.show_location_range) {
+                               if (!externs) {
+                                       if (ret)
+                                               strbuf_addf(&buf, "[INV]\t");
+                                       else
+                                               strbuf_addf(&buf, "[VAL]\t");
+                               } else
+                                       strbuf_addf(&buf, "[EXT]\t");
+                       }
+
+                       ret2 = die_get_varname(die_mem, &buf);
+
+                       if (!ret2 && probe_conf.show_location_range &&
+                               !externs) {
+                               strbuf_addf(&buf, "\t");
+                               ret2 = die_get_var_range(&af->pf.sp_die,
+                                                       die_mem, &buf);
+                       }
+
+                       pr_debug("Add new var: %s\n", buf.buf);
+                       if (ret2 == 0) {
+                               strlist__add(vl->vars,
+                                       strbuf_detach(&buf, NULL));
+                       }
+                       strbuf_release(&buf);
                }
        }
 
@@ -1302,9 +1362,9 @@ static int add_available_vars(Dwarf_Die *sc_die, struct probe_finder *pf)
        die_find_child(sc_die, collect_variables_cb, (void *)af, &die_mem);
 
        /* Find external variables */
-       if (!af->externs)
+       if (!probe_conf.show_ext_vars)
                goto out;
-       /* Don't need to search child DIE for externs. */
+       /* Don't need to search child DIE for external vars. */
        af->child = false;
        die_find_child(&pf->cu_die, collect_variables_cb, (void *)af, &die_mem);
 
@@ -1324,17 +1384,16 @@ out:
  */
 int debuginfo__find_available_vars_at(struct debuginfo *dbg,
                                      struct perf_probe_event *pev,
-                                     struct variable_list **vls,
-                                     int max_vls, bool externs)
+                                     struct variable_list **vls)
 {
        struct available_var_finder af = {
                        .pf = {.pev = pev, .callback = add_available_vars},
                        .mod = dbg->mod,
-                       .max_vls = max_vls, .externs = externs};
+                       .max_vls = probe_conf.max_probes};
        int ret;
 
        /* Allocate result vls array */
-       *vls = zalloc(sizeof(struct variable_list) * max_vls);
+       *vls = zalloc(sizeof(struct variable_list) * af.max_vls);
        if (*vls == NULL)
                return -ENOMEM;
 
@@ -1535,7 +1594,7 @@ static int line_range_search_cb(Dwarf_Die *sp_die, void *data)
                return DWARF_CB_OK;
 
        if (die_is_func_def(sp_die) &&
-           die_compare_name(sp_die, lr->function)) {
+           die_match_name(sp_die, lr->function)) {
                lf->fname = dwarf_decl_file(sp_die);
                dwarf_decl_line(sp_die, &lr->offset);
                pr_debug("fname: %s, lineno:%d\n", lf->fname, lr->offset);
index ebf8c8c814531ff4efaf5a7461c2ef6a7414df69..bed82716e1b44960a0ebc435d0ba1e94ed30730d 100644 (file)
@@ -10,6 +10,9 @@
 #define MAX_PROBES              128
 #define MAX_PROBE_ARGS          128
 
+#define PROBE_ARG_VARS         "$vars"
+#define PROBE_ARG_PARAMS       "$params"
+
 static inline int is_c_varname(const char *name)
 {
        /* TODO */
@@ -37,8 +40,7 @@ extern void debuginfo__delete(struct debuginfo *dbg);
 /* Find probe_trace_events specified by perf_probe_event from debuginfo */
 extern int debuginfo__find_trace_events(struct debuginfo *dbg,
                                        struct perf_probe_event *pev,
-                                       struct probe_trace_event **tevs,
-                                       int max_tevs);
+                                       struct probe_trace_event **tevs);
 
 /* Find a perf_probe_point from debuginfo */
 extern int debuginfo__find_probe_point(struct debuginfo *dbg,
@@ -52,8 +54,7 @@ extern int debuginfo__find_line_range(struct debuginfo *dbg,
 /* Find available variables */
 extern int debuginfo__find_available_vars_at(struct debuginfo *dbg,
                                             struct perf_probe_event *pev,
-                                            struct variable_list **vls,
-                                            int max_points, bool externs);
+                                            struct variable_list **vls);
 
 /* Find a src file from a DWARF tag path */
 int get_real_path(const char *raw_path, const char *comp_dir,
@@ -96,7 +97,6 @@ struct available_var_finder {
        struct variable_list    *vls;           /* Found variable lists */
        int                     nvls;           /* Number of variable lists */
        int                     max_vls;        /* Max no. of variable lists */
-       bool                    externs;        /* Find external vars too */
        bool                    child;          /* Search child scopes */
 };
 
index a126e6cc6e73ad8554e21a611b373a25599f5a9a..b234a6e3d0d4f378ff2fd4899159bd3190956a99 100644 (file)
@@ -74,3 +74,10 @@ void *pstack__pop(struct pstack *pstack)
        pstack->entries[pstack->top] = NULL;
        return ret;
 }
+
+void *pstack__peek(struct pstack *pstack)
+{
+       if (pstack->top == 0)
+               return NULL;
+       return pstack->entries[pstack->top - 1];
+}
index c3cb6584d52763f24c3074886de50c9ff741c71c..ded7f2e36624a9ff2d6ebe49c92aed2018c097dc 100644 (file)
@@ -10,5 +10,6 @@ bool pstack__empty(const struct pstack *pstack);
 void pstack__remove(struct pstack *pstack, void *key);
 void pstack__push(struct pstack *pstack, void *key);
 void *pstack__pop(struct pstack *pstack);
+void *pstack__peek(struct pstack *pstack);
 
 #endif /* _PERF_PSTACK_ */
index 4d28624a1ecaa6c6dc5d70fb2f0d43e91c56d0cf..5925fec90562fc355514489ae3d33814800f3896 100644 (file)
@@ -16,6 +16,7 @@ util/util.c
 util/xyarray.c
 util/cgroup.c
 util/rblist.c
+util/stat.c
 util/strlist.c
 util/trace-event.c
 ../../lib/rbtree.c
index 8acd0df88b5c4b75063d7ad2f83a55ef4f4f6286..d457c523a33d8bb7d00669dbd09e589eecf6de4b 100644 (file)
@@ -20,7 +20,7 @@ static int perf_do_probe_api(setup_probe_fn_t fn, int cpu, const char *str)
        if (!evlist)
                return -ENOMEM;
 
-       if (parse_events(evlist, str))
+       if (parse_events(evlist, str, NULL))
                goto out_delete;
 
        evsel = perf_evlist__first(evlist);
@@ -119,7 +119,16 @@ void perf_evlist__config(struct perf_evlist *evlist, struct record_opts *opts)
                        evsel->attr.comm_exec = 1;
        }
 
-       if (evlist->nr_entries > 1) {
+       if (opts->full_auxtrace) {
+               /*
+                * Need to be able to synthesize and parse selected events with
+                * arbitrary sample types, which requires always being able to
+                * match the id.
+                */
+               use_sample_identifier = perf_can_sample_identifier();
+               evlist__for_each(evlist, evsel)
+                       perf_evsel__set_sample_id(evsel, use_sample_identifier);
+       } else if (evlist->nr_entries > 1) {
                struct perf_evsel *first = perf_evlist__first(evlist);
 
                evlist__for_each(evlist, evsel) {
@@ -207,7 +216,7 @@ bool perf_evlist__can_select_event(struct perf_evlist *evlist, const char *str)
        if (!temp_evlist)
                return false;
 
-       err = parse_events(temp_evlist, str);
+       err = parse_events(temp_evlist, str, NULL);
        if (err)
                goto out_delete;
 
index 0c74012575ac925648c3f2eb350d7fb95bd154ce..aa482c10469d748fb2c6379ff854ba80f53b2b73 100644 (file)
 #include "cpumap.h"
 #include "perf_regs.h"
 #include "asm/bug.h"
+#include "auxtrace.h"
+#include "thread-stack.h"
 
-static int machines__deliver_event(struct machines *machines,
-                                  struct perf_evlist *evlist,
-                                  union perf_event *event,
-                                  struct perf_sample *sample,
-                                  struct perf_tool *tool, u64 file_offset);
+static int perf_session__deliver_event(struct perf_session *session,
+                                      union perf_event *event,
+                                      struct perf_sample *sample,
+                                      struct perf_tool *tool,
+                                      u64 file_offset);
 
 static int perf_session__open(struct perf_session *session)
 {
@@ -105,8 +107,8 @@ static int ordered_events__deliver_event(struct ordered_events *oe,
                return ret;
        }
 
-       return machines__deliver_event(&session->machines, session->evlist, event->event,
-                                      &sample, session->tool, event->file_offset);
+       return perf_session__deliver_event(session, event->event, &sample,
+                                          session->tool, event->file_offset);
 }
 
 struct perf_session *perf_session__new(struct perf_data_file *file,
@@ -119,6 +121,7 @@ struct perf_session *perf_session__new(struct perf_data_file *file,
 
        session->repipe = repipe;
        session->tool   = tool;
+       INIT_LIST_HEAD(&session->auxtrace_index);
        machines__init(&session->machines);
        ordered_events__init(&session->ordered_events, ordered_events__deliver_event);
 
@@ -185,6 +188,8 @@ static void perf_session_env__delete(struct perf_session_env *env)
 
 void perf_session__delete(struct perf_session *session)
 {
+       auxtrace__free(session);
+       auxtrace_index__free(&session->auxtrace_index);
        perf_session__destroy_kernel_maps(session);
        perf_session__delete_threads(session);
        perf_session_env__delete(&session->header.env);
@@ -262,6 +267,49 @@ static int process_id_index_stub(struct perf_tool *tool __maybe_unused,
        return 0;
 }
 
+static int process_event_auxtrace_info_stub(struct perf_tool *tool __maybe_unused,
+                               union perf_event *event __maybe_unused,
+                               struct perf_session *session __maybe_unused)
+{
+       dump_printf(": unhandled!\n");
+       return 0;
+}
+
+static int skipn(int fd, off_t n)
+{
+       char buf[4096];
+       ssize_t ret;
+
+       while (n > 0) {
+               ret = read(fd, buf, min(n, (off_t)sizeof(buf)));
+               if (ret <= 0)
+                       return ret;
+               n -= ret;
+       }
+
+       return 0;
+}
+
+static s64 process_event_auxtrace_stub(struct perf_tool *tool __maybe_unused,
+                                      union perf_event *event,
+                                      struct perf_session *session
+                                      __maybe_unused)
+{
+       dump_printf(": unhandled!\n");
+       if (perf_data_file__is_pipe(session->file))
+               skipn(perf_data_file__fd(session->file), event->auxtrace.size);
+       return event->auxtrace.size;
+}
+
+static
+int process_event_auxtrace_error_stub(struct perf_tool *tool __maybe_unused,
+                                     union perf_event *event __maybe_unused,
+                                     struct perf_session *session __maybe_unused)
+{
+       dump_printf(": unhandled!\n");
+       return 0;
+}
+
 void perf_tool__fill_defaults(struct perf_tool *tool)
 {
        if (tool->sample == NULL)
@@ -278,6 +326,12 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
                tool->exit = process_event_stub;
        if (tool->lost == NULL)
                tool->lost = perf_event__process_lost;
+       if (tool->lost_samples == NULL)
+               tool->lost_samples = perf_event__process_lost_samples;
+       if (tool->aux == NULL)
+               tool->aux = perf_event__process_aux;
+       if (tool->itrace_start == NULL)
+               tool->itrace_start = perf_event__process_itrace_start;
        if (tool->read == NULL)
                tool->read = process_event_sample_stub;
        if (tool->throttle == NULL)
@@ -298,6 +352,12 @@ void perf_tool__fill_defaults(struct perf_tool *tool)
        }
        if (tool->id_index == NULL)
                tool->id_index = process_id_index_stub;
+       if (tool->auxtrace_info == NULL)
+               tool->auxtrace_info = process_event_auxtrace_info_stub;
+       if (tool->auxtrace == NULL)
+               tool->auxtrace = process_event_auxtrace_stub;
+       if (tool->auxtrace_error == NULL)
+               tool->auxtrace_error = process_event_auxtrace_error_stub;
 }
 
 static void swap_sample_id_all(union perf_event *event, void *data)
@@ -390,6 +450,26 @@ static void perf_event__read_swap(union perf_event *event, bool sample_id_all)
                swap_sample_id_all(event, &event->read + 1);
 }
 
+static void perf_event__aux_swap(union perf_event *event, bool sample_id_all)
+{
+       event->aux.aux_offset = bswap_64(event->aux.aux_offset);
+       event->aux.aux_size   = bswap_64(event->aux.aux_size);
+       event->aux.flags      = bswap_64(event->aux.flags);
+
+       if (sample_id_all)
+               swap_sample_id_all(event, &event->aux + 1);
+}
+
+static void perf_event__itrace_start_swap(union perf_event *event,
+                                         bool sample_id_all)
+{
+       event->itrace_start.pid  = bswap_32(event->itrace_start.pid);
+       event->itrace_start.tid  = bswap_32(event->itrace_start.tid);
+
+       if (sample_id_all)
+               swap_sample_id_all(event, &event->itrace_start + 1);
+}
+
 static void perf_event__throttle_swap(union perf_event *event,
                                      bool sample_id_all)
 {
@@ -438,19 +518,42 @@ void perf_event__attr_swap(struct perf_event_attr *attr)
 {
        attr->type              = bswap_32(attr->type);
        attr->size              = bswap_32(attr->size);
-       attr->config            = bswap_64(attr->config);
-       attr->sample_period     = bswap_64(attr->sample_period);
-       attr->sample_type       = bswap_64(attr->sample_type);
-       attr->read_format       = bswap_64(attr->read_format);
-       attr->wakeup_events     = bswap_32(attr->wakeup_events);
-       attr->bp_type           = bswap_32(attr->bp_type);
-       attr->bp_addr           = bswap_64(attr->bp_addr);
-       attr->bp_len            = bswap_64(attr->bp_len);
-       attr->branch_sample_type = bswap_64(attr->branch_sample_type);
-       attr->sample_regs_user   = bswap_64(attr->sample_regs_user);
-       attr->sample_stack_user  = bswap_32(attr->sample_stack_user);
 
-       swap_bitfield((u8 *) (&attr->read_format + 1), sizeof(u64));
+#define bswap_safe(f, n)                                       \
+       (attr->size > (offsetof(struct perf_event_attr, f) +    \
+                      sizeof(attr->f) * (n)))
+#define bswap_field(f, sz)                     \
+do {                                           \
+       if (bswap_safe(f, 0))                   \
+               attr->f = bswap_##sz(attr->f);  \
+} while(0)
+#define bswap_field_32(f) bswap_field(f, 32)
+#define bswap_field_64(f) bswap_field(f, 64)
+
+       bswap_field_64(config);
+       bswap_field_64(sample_period);
+       bswap_field_64(sample_type);
+       bswap_field_64(read_format);
+       bswap_field_32(wakeup_events);
+       bswap_field_32(bp_type);
+       bswap_field_64(bp_addr);
+       bswap_field_64(bp_len);
+       bswap_field_64(branch_sample_type);
+       bswap_field_64(sample_regs_user);
+       bswap_field_32(sample_stack_user);
+       bswap_field_32(aux_watermark);
+
+       /*
+        * After read_format are bitfields. Check read_format because
+        * we are unable to use offsetof on bitfield.
+        */
+       if (bswap_safe(read_format, 1))
+               swap_bitfield((u8 *) (&attr->read_format + 1),
+                             sizeof(u64));
+#undef bswap_field_64
+#undef bswap_field_32
+#undef bswap_field
+#undef bswap_safe
 }
 
 static void perf_event__hdr_attr_swap(union perf_event *event,
@@ -478,6 +581,40 @@ static void perf_event__tracing_data_swap(union perf_event *event,
        event->tracing_data.size = bswap_32(event->tracing_data.size);
 }
 
+static void perf_event__auxtrace_info_swap(union perf_event *event,
+                                          bool sample_id_all __maybe_unused)
+{
+       size_t size;
+
+       event->auxtrace_info.type = bswap_32(event->auxtrace_info.type);
+
+       size = event->header.size;
+       size -= (void *)&event->auxtrace_info.priv - (void *)event;
+       mem_bswap_64(event->auxtrace_info.priv, size);
+}
+
+static void perf_event__auxtrace_swap(union perf_event *event,
+                                     bool sample_id_all __maybe_unused)
+{
+       event->auxtrace.size      = bswap_64(event->auxtrace.size);
+       event->auxtrace.offset    = bswap_64(event->auxtrace.offset);
+       event->auxtrace.reference = bswap_64(event->auxtrace.reference);
+       event->auxtrace.idx       = bswap_32(event->auxtrace.idx);
+       event->auxtrace.tid       = bswap_32(event->auxtrace.tid);
+       event->auxtrace.cpu       = bswap_32(event->auxtrace.cpu);
+}
+
+static void perf_event__auxtrace_error_swap(union perf_event *event,
+                                           bool sample_id_all __maybe_unused)
+{
+       event->auxtrace_error.type = bswap_32(event->auxtrace_error.type);
+       event->auxtrace_error.code = bswap_32(event->auxtrace_error.code);
+       event->auxtrace_error.cpu  = bswap_32(event->auxtrace_error.cpu);
+       event->auxtrace_error.pid  = bswap_32(event->auxtrace_error.pid);
+       event->auxtrace_error.tid  = bswap_32(event->auxtrace_error.tid);
+       event->auxtrace_error.ip   = bswap_64(event->auxtrace_error.ip);
+}
+
 typedef void (*perf_event__swap_op)(union perf_event *event,
                                    bool sample_id_all);
 
@@ -492,11 +629,17 @@ static perf_event__swap_op perf_event__swap_ops[] = {
        [PERF_RECORD_THROTTLE]            = perf_event__throttle_swap,
        [PERF_RECORD_UNTHROTTLE]          = perf_event__throttle_swap,
        [PERF_RECORD_SAMPLE]              = perf_event__all64_swap,
+       [PERF_RECORD_AUX]                 = perf_event__aux_swap,
+       [PERF_RECORD_ITRACE_START]        = perf_event__itrace_start_swap,
+       [PERF_RECORD_LOST_SAMPLES]        = perf_event__all64_swap,
        [PERF_RECORD_HEADER_ATTR]         = perf_event__hdr_attr_swap,
        [PERF_RECORD_HEADER_EVENT_TYPE]   = perf_event__event_type_swap,
        [PERF_RECORD_HEADER_TRACING_DATA] = perf_event__tracing_data_swap,
        [PERF_RECORD_HEADER_BUILD_ID]     = NULL,
        [PERF_RECORD_ID_INDEX]            = perf_event__all64_swap,
+       [PERF_RECORD_AUXTRACE_INFO]       = perf_event__auxtrace_info_swap,
+       [PERF_RECORD_AUXTRACE]            = perf_event__auxtrace_swap,
+       [PERF_RECORD_AUXTRACE_ERROR]      = perf_event__auxtrace_error_swap,
        [PERF_RECORD_HEADER_MAX]          = NULL,
 };
 
@@ -921,6 +1064,8 @@ static int machines__deliver_event(struct machines *machines,
        case PERF_RECORD_MMAP:
                return tool->mmap(tool, event, sample, machine);
        case PERF_RECORD_MMAP2:
+               if (event->header.misc & PERF_RECORD_MISC_PROC_MAP_PARSE_TIMEOUT)
+                       ++evlist->stats.nr_proc_map_timeout;
                return tool->mmap2(tool, event, sample, machine);
        case PERF_RECORD_COMM:
                return tool->comm(tool, event, sample, machine);
@@ -932,18 +1077,44 @@ static int machines__deliver_event(struct machines *machines,
                if (tool->lost == perf_event__process_lost)
                        evlist->stats.total_lost += event->lost.lost;
                return tool->lost(tool, event, sample, machine);
+       case PERF_RECORD_LOST_SAMPLES:
+               if (tool->lost_samples == perf_event__process_lost_samples)
+                       evlist->stats.total_lost_samples += event->lost_samples.lost;
+               return tool->lost_samples(tool, event, sample, machine);
        case PERF_RECORD_READ:
                return tool->read(tool, event, sample, evsel, machine);
        case PERF_RECORD_THROTTLE:
                return tool->throttle(tool, event, sample, machine);
        case PERF_RECORD_UNTHROTTLE:
                return tool->unthrottle(tool, event, sample, machine);
+       case PERF_RECORD_AUX:
+               return tool->aux(tool, event, sample, machine);
+       case PERF_RECORD_ITRACE_START:
+               return tool->itrace_start(tool, event, sample, machine);
        default:
                ++evlist->stats.nr_unknown_events;
                return -1;
        }
 }
 
+static int perf_session__deliver_event(struct perf_session *session,
+                                      union perf_event *event,
+                                      struct perf_sample *sample,
+                                      struct perf_tool *tool,
+                                      u64 file_offset)
+{
+       int ret;
+
+       ret = auxtrace__process_event(session, event, sample, tool);
+       if (ret < 0)
+               return ret;
+       if (ret > 0)
+               return 0;
+
+       return machines__deliver_event(&session->machines, session->evlist,
+                                      event, sample, tool, file_offset);
+}
+
 static s64 perf_session__process_user_event(struct perf_session *session,
                                            union perf_event *event,
                                            u64 file_offset)
@@ -980,6 +1151,15 @@ static s64 perf_session__process_user_event(struct perf_session *session,
                return tool->finished_round(tool, event, oe);
        case PERF_RECORD_ID_INDEX:
                return tool->id_index(tool, event, session);
+       case PERF_RECORD_AUXTRACE_INFO:
+               return tool->auxtrace_info(tool, event, session);
+       case PERF_RECORD_AUXTRACE:
+               /* setup for reading amidst mmap */
+               lseek(fd, file_offset + event->header.size, SEEK_SET);
+               return tool->auxtrace(tool, event, session);
+       case PERF_RECORD_AUXTRACE_ERROR:
+               perf_session__auxtrace_error_inc(session, event);
+               return tool->auxtrace_error(tool, event, session);
        default:
                return -EINVAL;
        }
@@ -1034,7 +1214,7 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset,
                return -1;
 
        if (lseek(fd, file_offset, SEEK_SET) == (off_t)-1 ||
-           readn(fd, &buf, hdr_sz) != (ssize_t)hdr_sz)
+           readn(fd, buf, hdr_sz) != (ssize_t)hdr_sz)
                return -1;
 
        event = (union perf_event *)buf;
@@ -1042,12 +1222,12 @@ int perf_session__peek_event(struct perf_session *session, off_t file_offset,
        if (session->header.needs_swap)
                perf_event_header__bswap(&event->header);
 
-       if (event->header.size < hdr_sz)
+       if (event->header.size < hdr_sz || event->header.size > buf_sz)
                return -1;
 
        rest = event->header.size - hdr_sz;
 
-       if (readn(fd, &buf, rest) != (ssize_t)rest)
+       if (readn(fd, buf, rest) != (ssize_t)rest)
                return -1;
 
        if (session->header.needs_swap)
@@ -1096,8 +1276,8 @@ static s64 perf_session__process_event(struct perf_session *session,
                        return ret;
        }
 
-       return machines__deliver_event(&session->machines, evlist, event,
-                                      &sample, tool, file_offset);
+       return perf_session__deliver_event(session, event, &sample, tool,
+                                          file_offset);
 }
 
 void perf_event_header__bswap(struct perf_event_header *hdr)
@@ -1138,6 +1318,18 @@ static void perf_session__warn_about_errors(const struct perf_session *session)
                            stats->nr_events[PERF_RECORD_LOST]);
        }
 
+       if (session->tool->lost_samples == perf_event__process_lost_samples) {
+               double drop_rate;
+
+               drop_rate = (double)stats->total_lost_samples /
+                           (double) (stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples);
+               if (drop_rate > 0.05) {
+                       ui__warning("Processed %" PRIu64 " samples and lost %3.2f%% samples!\n\n",
+                                   stats->nr_events[PERF_RECORD_SAMPLE] + stats->total_lost_samples,
+                                   drop_rate * 100.0);
+               }
+       }
+
        if (stats->nr_unknown_events != 0) {
                ui__warning("Found %u unknown events!\n\n"
                            "Is this an older tool processing a perf.data "
@@ -1168,6 +1360,32 @@ static void perf_session__warn_about_errors(const struct perf_session *session)
 
        if (oe->nr_unordered_events != 0)
                ui__warning("%u out of order events recorded.\n", oe->nr_unordered_events);
+
+       events_stats__auxtrace_error_warn(stats);
+
+       if (stats->nr_proc_map_timeout != 0) {
+               ui__warning("%d map information files for pre-existing threads were\n"
+                           "not processed, if there are samples for addresses they\n"
+                           "will not be resolved, you may find out which are these\n"
+                           "threads by running with -v and redirecting the output\n"
+                           "to a file.\n"
+                           "The time limit to process proc map is too short?\n"
+                           "Increase it by --proc-map-timeout\n",
+                           stats->nr_proc_map_timeout);
+       }
+}
+
+static int perf_session__flush_thread_stack(struct thread *thread,
+                                           void *p __maybe_unused)
+{
+       return thread_stack__flush(thread);
+}
+
+static int perf_session__flush_thread_stacks(struct perf_session *session)
+{
+       return machines__for_each_thread(&session->machines,
+                                        perf_session__flush_thread_stack,
+                                        NULL);
 }
 
 volatile int session_done;
@@ -1256,10 +1474,17 @@ more:
 done:
        /* do the final flush for ordered samples */
        err = ordered_events__flush(oe, OE_FLUSH__FINAL);
+       if (err)
+               goto out_err;
+       err = auxtrace__flush_events(session, tool);
+       if (err)
+               goto out_err;
+       err = perf_session__flush_thread_stacks(session);
 out_err:
        free(buf);
        perf_session__warn_about_errors(session);
        ordered_events__free(&session->ordered_events);
+       auxtrace__free_events(session);
        return err;
 }
 
@@ -1402,10 +1627,17 @@ more:
 out:
        /* do the final flush for ordered samples */
        err = ordered_events__flush(oe, OE_FLUSH__FINAL);
+       if (err)
+               goto out_err;
+       err = auxtrace__flush_events(session, tool);
+       if (err)
+               goto out_err;
+       err = perf_session__flush_thread_stacks(session);
 out_err:
        ui_progress__finish();
        perf_session__warn_about_errors(session);
        ordered_events__free(&session->ordered_events);
+       auxtrace__free_events(session);
        session->one_mmap = false;
        return err;
 }
@@ -1488,7 +1720,13 @@ size_t perf_session__fprintf_dsos_buildid(struct perf_session *session, FILE *fp
 
 size_t perf_session__fprintf_nr_events(struct perf_session *session, FILE *fp)
 {
-       size_t ret = fprintf(fp, "Aggregated stats:\n");
+       size_t ret;
+       const char *msg = "";
+
+       if (perf_header__has_feat(&session->header, HEADER_AUXTRACE))
+               msg = " (excludes AUX area (e.g. instruction trace) decoded / synthesized events)";
+
+       ret = fprintf(fp, "Aggregated stats:%s\n", msg);
 
        ret += events_stats__fprintf(&session->evlist->stats, fp);
        return ret;
index d5fa7b7916ef40dd5216e533a5ce04bfdfc8a7d2..b44afc75d1cc51feb05f943973d0813e8556122c 100644 (file)
 struct ip_callchain;
 struct thread;
 
+struct auxtrace;
+struct itrace_synth_opts;
+
 struct perf_session {
        struct perf_header      header;
        struct machines         machines;
        struct perf_evlist      *evlist;
+       struct auxtrace         *auxtrace;
+       struct itrace_synth_opts *itrace_synth_opts;
+       struct list_head        auxtrace_index;
        struct trace_event      tevent;
        bool                    repipe;
        bool                    one_mmap;
index 4593f36ecc4c651bef30112b008979b336894e0c..4c65a143a34c96747ab7c6d39284f264f0f8d41e 100644 (file)
@@ -89,14 +89,14 @@ static int64_t
 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
 {
        /* Compare the addr that should be unique among comm */
-       return comm__str(right->comm) - comm__str(left->comm);
+       return strcmp(comm__str(right->comm), comm__str(left->comm));
 }
 
 static int64_t
 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
 {
        /* Compare the addr that should be unique among comm */
-       return comm__str(right->comm) - comm__str(left->comm);
+       return strcmp(comm__str(right->comm), comm__str(left->comm));
 }
 
 static int64_t
@@ -182,18 +182,16 @@ static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
 
 static int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
 {
-       u64 ip_l, ip_r;
-
        if (!sym_l || !sym_r)
                return cmp_null(sym_l, sym_r);
 
        if (sym_l == sym_r)
                return 0;
 
-       ip_l = sym_l->start;
-       ip_r = sym_r->start;
+       if (sym_l->start != sym_r->start)
+               return (int64_t)(sym_r->start - sym_l->start);
 
-       return (int64_t)(ip_r - ip_l);
+       return (int64_t)(sym_r->end - sym_l->end);
 }
 
 static int64_t
index 846036a921dc9152623fd5e70671b5f196e4b3de..e97cd476d336f2a9cad2a1eeb9daba34d08ed3a4 100644 (file)
@@ -58,15 +58,16 @@ struct he_stat {
 
 struct hist_entry_diff {
        bool    computed;
+       union {
+               /* PERF_HPP__DELTA */
+               double  period_ratio_delta;
 
-       /* PERF_HPP__DELTA */
-       double  period_ratio_delta;
-
-       /* PERF_HPP__RATIO */
-       double  period_ratio;
+               /* PERF_HPP__RATIO */
+               double  period_ratio;
 
-       /* HISTC_WEIGHTED_DIFF */
-       s64     wdiff;
+               /* HISTC_WEIGHTED_DIFF */
+               s64     wdiff;
+       };
 };
 
 /**
@@ -92,21 +93,28 @@ struct hist_entry {
        s32                     cpu;
        u8                      cpumode;
 
-       struct hist_entry_diff  diff;
-
        /* We are added by hists__add_dummy_entry. */
        bool                    dummy;
 
-       /* XXX These two should move to some tree widget lib */
-       u16                     row_offset;
-       u16                     nr_rows;
-
-       bool                    init_have_children;
        char                    level;
        u8                      filtered;
+       union {
+               /*
+                * Since perf diff only supports the stdio output, TUI
+                * fields are only accessed from perf report (or perf
+                * top).  So make it an union to reduce memory usage.
+                */
+               struct hist_entry_diff  diff;
+               struct /* for TUI */ {
+                       u16     row_offset;
+                       u16     nr_rows;
+                       bool    init_have_children;
+                       bool    unfolded;
+                       bool    has_children;
+               };
+       };
        char                    *srcline;
        struct symbol           *parent;
-       unsigned long           position;
        struct rb_root          sorted_chain;
        struct branch_info      *branch_info;
        struct hists            *hists;
diff --git a/tools/perf/util/stat-shadow.c b/tools/perf/util/stat-shadow.c
new file mode 100644 (file)
index 0000000..53e8bb7
--- /dev/null
@@ -0,0 +1,434 @@
+#include <stdio.h>
+#include "evsel.h"
+#include "stat.h"
+#include "color.h"
+
+enum {
+       CTX_BIT_USER    = 1 << 0,
+       CTX_BIT_KERNEL  = 1 << 1,
+       CTX_BIT_HV      = 1 << 2,
+       CTX_BIT_HOST    = 1 << 3,
+       CTX_BIT_IDLE    = 1 << 4,
+       CTX_BIT_MAX     = 1 << 5,
+};
+
+#define NUM_CTX CTX_BIT_MAX
+
+static struct stats runtime_nsecs_stats[MAX_NR_CPUS];
+static struct stats runtime_cycles_stats[NUM_CTX][MAX_NR_CPUS];
+static struct stats runtime_stalled_cycles_front_stats[NUM_CTX][MAX_NR_CPUS];
+static struct stats runtime_stalled_cycles_back_stats[NUM_CTX][MAX_NR_CPUS];
+static struct stats runtime_branches_stats[NUM_CTX][MAX_NR_CPUS];
+static struct stats runtime_cacherefs_stats[NUM_CTX][MAX_NR_CPUS];
+static struct stats runtime_l1_dcache_stats[NUM_CTX][MAX_NR_CPUS];
+static struct stats runtime_l1_icache_stats[NUM_CTX][MAX_NR_CPUS];
+static struct stats runtime_ll_cache_stats[NUM_CTX][MAX_NR_CPUS];
+static struct stats runtime_itlb_cache_stats[NUM_CTX][MAX_NR_CPUS];
+static struct stats runtime_dtlb_cache_stats[NUM_CTX][MAX_NR_CPUS];
+static struct stats runtime_cycles_in_tx_stats[NUM_CTX][MAX_NR_CPUS];
+static struct stats runtime_transaction_stats[NUM_CTX][MAX_NR_CPUS];
+static struct stats runtime_elision_stats[NUM_CTX][MAX_NR_CPUS];
+
+struct stats walltime_nsecs_stats;
+
+static int evsel_context(struct perf_evsel *evsel)
+{
+       int ctx = 0;
+
+       if (evsel->attr.exclude_kernel)
+               ctx |= CTX_BIT_KERNEL;
+       if (evsel->attr.exclude_user)
+               ctx |= CTX_BIT_USER;
+       if (evsel->attr.exclude_hv)
+               ctx |= CTX_BIT_HV;
+       if (evsel->attr.exclude_host)
+               ctx |= CTX_BIT_HOST;
+       if (evsel->attr.exclude_idle)
+               ctx |= CTX_BIT_IDLE;
+
+       return ctx;
+}
+
+void perf_stat__reset_shadow_stats(void)
+{
+       memset(runtime_nsecs_stats, 0, sizeof(runtime_nsecs_stats));
+       memset(runtime_cycles_stats, 0, sizeof(runtime_cycles_stats));
+       memset(runtime_stalled_cycles_front_stats, 0, sizeof(runtime_stalled_cycles_front_stats));
+       memset(runtime_stalled_cycles_back_stats, 0, sizeof(runtime_stalled_cycles_back_stats));
+       memset(runtime_branches_stats, 0, sizeof(runtime_branches_stats));
+       memset(runtime_cacherefs_stats, 0, sizeof(runtime_cacherefs_stats));
+       memset(runtime_l1_dcache_stats, 0, sizeof(runtime_l1_dcache_stats));
+       memset(runtime_l1_icache_stats, 0, sizeof(runtime_l1_icache_stats));
+       memset(runtime_ll_cache_stats, 0, sizeof(runtime_ll_cache_stats));
+       memset(runtime_itlb_cache_stats, 0, sizeof(runtime_itlb_cache_stats));
+       memset(runtime_dtlb_cache_stats, 0, sizeof(runtime_dtlb_cache_stats));
+       memset(runtime_cycles_in_tx_stats, 0,
+                       sizeof(runtime_cycles_in_tx_stats));
+       memset(runtime_transaction_stats, 0,
+               sizeof(runtime_transaction_stats));
+       memset(runtime_elision_stats, 0, sizeof(runtime_elision_stats));
+       memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats));
+}
+
+/*
+ * Update various tracking values we maintain to print
+ * more semantic information such as miss/hit ratios,
+ * instruction rates, etc:
+ */
+void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
+                                   int cpu)
+{
+       int ctx = evsel_context(counter);
+
+       if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK))
+               update_stats(&runtime_nsecs_stats[cpu], count[0]);
+       else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
+               update_stats(&runtime_cycles_stats[ctx][cpu], count[0]);
+       else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
+               update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
+       else if (perf_stat_evsel__is(counter, TRANSACTION_START))
+               update_stats(&runtime_transaction_stats[ctx][cpu], count[0]);
+       else if (perf_stat_evsel__is(counter, ELISION_START))
+               update_stats(&runtime_elision_stats[ctx][cpu], count[0]);
+       else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
+               update_stats(&runtime_stalled_cycles_front_stats[ctx][cpu], count[0]);
+       else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
+               update_stats(&runtime_stalled_cycles_back_stats[ctx][cpu], count[0]);
+       else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
+               update_stats(&runtime_branches_stats[ctx][cpu], count[0]);
+       else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
+               update_stats(&runtime_cacherefs_stats[ctx][cpu], count[0]);
+       else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
+               update_stats(&runtime_l1_dcache_stats[ctx][cpu], count[0]);
+       else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
+               update_stats(&runtime_ll_cache_stats[ctx][cpu], count[0]);
+       else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL))
+               update_stats(&runtime_ll_cache_stats[ctx][cpu], count[0]);
+       else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
+               update_stats(&runtime_dtlb_cache_stats[ctx][cpu], count[0]);
+       else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
+               update_stats(&runtime_itlb_cache_stats[ctx][cpu], count[0]);
+}
+
+/* used for get_ratio_color() */
+enum grc_type {
+       GRC_STALLED_CYCLES_FE,
+       GRC_STALLED_CYCLES_BE,
+       GRC_CACHE_MISSES,
+       GRC_MAX_NR
+};
+
+static const char *get_ratio_color(enum grc_type type, double ratio)
+{
+       static const double grc_table[GRC_MAX_NR][3] = {
+               [GRC_STALLED_CYCLES_FE] = { 50.0, 30.0, 10.0 },
+               [GRC_STALLED_CYCLES_BE] = { 75.0, 50.0, 20.0 },
+               [GRC_CACHE_MISSES]      = { 20.0, 10.0, 5.0 },
+       };
+       const char *color = PERF_COLOR_NORMAL;
+
+       if (ratio > grc_table[type][0])
+               color = PERF_COLOR_RED;
+       else if (ratio > grc_table[type][1])
+               color = PERF_COLOR_MAGENTA;
+       else if (ratio > grc_table[type][2])
+               color = PERF_COLOR_YELLOW;
+
+       return color;
+}
+
+static void print_stalled_cycles_frontend(FILE *out, int cpu,
+                                         struct perf_evsel *evsel
+                                         __maybe_unused, double avg)
+{
+       double total, ratio = 0.0;
+       const char *color;
+       int ctx = evsel_context(evsel);
+
+       total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
+
+       if (total)
+               ratio = avg / total * 100.0;
+
+       color = get_ratio_color(GRC_STALLED_CYCLES_FE, ratio);
+
+       fprintf(out, " #  ");
+       color_fprintf(out, color, "%6.2f%%", ratio);
+       fprintf(out, " frontend cycles idle   ");
+}
+
+static void print_stalled_cycles_backend(FILE *out, int cpu,
+                                        struct perf_evsel *evsel
+                                        __maybe_unused, double avg)
+{
+       double total, ratio = 0.0;
+       const char *color;
+       int ctx = evsel_context(evsel);
+
+       total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
+
+       if (total)
+               ratio = avg / total * 100.0;
+
+       color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio);
+
+       fprintf(out, " #  ");
+       color_fprintf(out, color, "%6.2f%%", ratio);
+       fprintf(out, " backend  cycles idle   ");
+}
+
+static void print_branch_misses(FILE *out, int cpu,
+                               struct perf_evsel *evsel __maybe_unused,
+                               double avg)
+{
+       double total, ratio = 0.0;
+       const char *color;
+       int ctx = evsel_context(evsel);
+
+       total = avg_stats(&runtime_branches_stats[ctx][cpu]);
+
+       if (total)
+               ratio = avg / total * 100.0;
+
+       color = get_ratio_color(GRC_CACHE_MISSES, ratio);
+
+       fprintf(out, " #  ");
+       color_fprintf(out, color, "%6.2f%%", ratio);
+       fprintf(out, " of all branches        ");
+}
+
+static void print_l1_dcache_misses(FILE *out, int cpu,
+                                  struct perf_evsel *evsel __maybe_unused,
+                                  double avg)
+{
+       double total, ratio = 0.0;
+       const char *color;
+       int ctx = evsel_context(evsel);
+
+       total = avg_stats(&runtime_l1_dcache_stats[ctx][cpu]);
+
+       if (total)
+               ratio = avg / total * 100.0;
+
+       color = get_ratio_color(GRC_CACHE_MISSES, ratio);
+
+       fprintf(out, " #  ");
+       color_fprintf(out, color, "%6.2f%%", ratio);
+       fprintf(out, " of all L1-dcache hits  ");
+}
+
+static void print_l1_icache_misses(FILE *out, int cpu,
+                                  struct perf_evsel *evsel __maybe_unused,
+                                  double avg)
+{
+       double total, ratio = 0.0;
+       const char *color;
+       int ctx = evsel_context(evsel);
+
+       total = avg_stats(&runtime_l1_icache_stats[ctx][cpu]);
+
+       if (total)
+               ratio = avg / total * 100.0;
+
+       color = get_ratio_color(GRC_CACHE_MISSES, ratio);
+
+       fprintf(out, " #  ");
+       color_fprintf(out, color, "%6.2f%%", ratio);
+       fprintf(out, " of all L1-icache hits  ");
+}
+
+static void print_dtlb_cache_misses(FILE *out, int cpu,
+                                   struct perf_evsel *evsel __maybe_unused,
+                                   double avg)
+{
+       double total, ratio = 0.0;
+       const char *color;
+       int ctx = evsel_context(evsel);
+
+       total = avg_stats(&runtime_dtlb_cache_stats[ctx][cpu]);
+
+       if (total)
+               ratio = avg / total * 100.0;
+
+       color = get_ratio_color(GRC_CACHE_MISSES, ratio);
+
+       fprintf(out, " #  ");
+       color_fprintf(out, color, "%6.2f%%", ratio);
+       fprintf(out, " of all dTLB cache hits ");
+}
+
+static void print_itlb_cache_misses(FILE *out, int cpu,
+                                   struct perf_evsel *evsel __maybe_unused,
+                                   double avg)
+{
+       double total, ratio = 0.0;
+       const char *color;
+       int ctx = evsel_context(evsel);
+
+       total = avg_stats(&runtime_itlb_cache_stats[ctx][cpu]);
+
+       if (total)
+               ratio = avg / total * 100.0;
+
+       color = get_ratio_color(GRC_CACHE_MISSES, ratio);
+
+       fprintf(out, " #  ");
+       color_fprintf(out, color, "%6.2f%%", ratio);
+       fprintf(out, " of all iTLB cache hits ");
+}
+
+static void print_ll_cache_misses(FILE *out, int cpu,
+                                 struct perf_evsel *evsel __maybe_unused,
+                                 double avg)
+{
+       double total, ratio = 0.0;
+       const char *color;
+       int ctx = evsel_context(evsel);
+
+       total = avg_stats(&runtime_ll_cache_stats[ctx][cpu]);
+
+       if (total)
+               ratio = avg / total * 100.0;
+
+       color = get_ratio_color(GRC_CACHE_MISSES, ratio);
+
+       fprintf(out, " #  ");
+       color_fprintf(out, color, "%6.2f%%", ratio);
+       fprintf(out, " of all LL-cache hits   ");
+}
+
+void perf_stat__print_shadow_stats(FILE *out, struct perf_evsel *evsel,
+                                  double avg, int cpu, enum aggr_mode aggr)
+{
+       double total, ratio = 0.0, total2;
+       int ctx = evsel_context(evsel);
+
+       if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
+               total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
+               if (total) {
+                       ratio = avg / total;
+                       fprintf(out, " #   %5.2f  insns per cycle        ", ratio);
+               } else {
+                       fprintf(out, "                                   ");
+               }
+               total = avg_stats(&runtime_stalled_cycles_front_stats[ctx][cpu]);
+               total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[ctx][cpu]));
+
+               if (total && avg) {
+                       ratio = total / avg;
+                       fprintf(out, "\n");
+                       if (aggr == AGGR_NONE)
+                               fprintf(out, "        ");
+                       fprintf(out, "                                                  #   %5.2f  stalled cycles per insn", ratio);
+               }
+
+       } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) &&
+                       runtime_branches_stats[ctx][cpu].n != 0) {
+               print_branch_misses(out, cpu, evsel, avg);
+       } else if (
+               evsel->attr.type == PERF_TYPE_HW_CACHE &&
+               evsel->attr.config ==  ( PERF_COUNT_HW_CACHE_L1D |
+                                       ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
+                                       ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
+                       runtime_l1_dcache_stats[ctx][cpu].n != 0) {
+               print_l1_dcache_misses(out, cpu, evsel, avg);
+       } else if (
+               evsel->attr.type == PERF_TYPE_HW_CACHE &&
+               evsel->attr.config ==  ( PERF_COUNT_HW_CACHE_L1I |
+                                       ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
+                                       ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
+                       runtime_l1_icache_stats[ctx][cpu].n != 0) {
+               print_l1_icache_misses(out, cpu, evsel, avg);
+       } else if (
+               evsel->attr.type == PERF_TYPE_HW_CACHE &&
+               evsel->attr.config ==  ( PERF_COUNT_HW_CACHE_DTLB |
+                                       ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
+                                       ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
+                       runtime_dtlb_cache_stats[ctx][cpu].n != 0) {
+               print_dtlb_cache_misses(out, cpu, evsel, avg);
+       } else if (
+               evsel->attr.type == PERF_TYPE_HW_CACHE &&
+               evsel->attr.config ==  ( PERF_COUNT_HW_CACHE_ITLB |
+                                       ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
+                                       ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
+                       runtime_itlb_cache_stats[ctx][cpu].n != 0) {
+               print_itlb_cache_misses(out, cpu, evsel, avg);
+       } else if (
+               evsel->attr.type == PERF_TYPE_HW_CACHE &&
+               evsel->attr.config ==  ( PERF_COUNT_HW_CACHE_LL |
+                                       ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
+                                       ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
+                       runtime_ll_cache_stats[ctx][cpu].n != 0) {
+               print_ll_cache_misses(out, cpu, evsel, avg);
+       } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES) &&
+                       runtime_cacherefs_stats[ctx][cpu].n != 0) {
+               total = avg_stats(&runtime_cacherefs_stats[ctx][cpu]);
+
+               if (total)
+                       ratio = avg * 100 / total;
+
+               fprintf(out, " # %8.3f %% of all cache refs    ", ratio);
+
+       } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) {
+               print_stalled_cycles_frontend(out, cpu, evsel, avg);
+       } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) {
+               print_stalled_cycles_backend(out, cpu, evsel, avg);
+       } else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
+               total = avg_stats(&runtime_nsecs_stats[cpu]);
+
+               if (total) {
+                       ratio = avg / total;
+                       fprintf(out, " # %8.3f GHz                    ", ratio);
+               } else {
+                       fprintf(out, "                                   ");
+               }
+       } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX)) {
+               total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
+               if (total)
+                       fprintf(out,
+                               " #   %5.2f%% transactional cycles   ",
+                               100.0 * (avg / total));
+       } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX_CP)) {
+               total = avg_stats(&runtime_cycles_stats[ctx][cpu]);
+               total2 = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
+               if (total2 < avg)
+                       total2 = avg;
+               if (total)
+                       fprintf(out,
+                               " #   %5.2f%% aborted cycles         ",
+                               100.0 * ((total2-avg) / total));
+       } else if (perf_stat_evsel__is(evsel, TRANSACTION_START) &&
+                  avg > 0 &&
+                  runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
+               total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
+
+               if (total)
+                       ratio = total / avg;
+
+               fprintf(out, " # %8.0f cycles / transaction   ", ratio);
+       } else if (perf_stat_evsel__is(evsel, ELISION_START) &&
+                  avg > 0 &&
+                  runtime_cycles_in_tx_stats[ctx][cpu].n != 0) {
+               total = avg_stats(&runtime_cycles_in_tx_stats[ctx][cpu]);
+
+               if (total)
+                       ratio = total / avg;
+
+               fprintf(out, " # %8.0f cycles / elision       ", ratio);
+       } else if (runtime_nsecs_stats[cpu].n != 0) {
+               char unit = 'M';
+
+               total = avg_stats(&runtime_nsecs_stats[cpu]);
+
+               if (total)
+                       ratio = 1000.0 * avg / total;
+               if (ratio < 0.001) {
+                       ratio *= 1000;
+                       unit = 'K';
+               }
+
+               fprintf(out, " # %8.3f %c/sec                  ", ratio, unit);
+       } else {
+               fprintf(out, "                                   ");
+       }
+}
index 6506b3dfb6059f71aa6c345df21fcbc16e651604..4014b709f956b96b86dab3526f3b35fec6d8c166 100644 (file)
@@ -1,6 +1,6 @@
 #include <math.h>
-
 #include "stat.h"
+#include "evsel.h"
 
 void update_stats(struct stats *stats, u64 val)
 {
@@ -61,3 +61,72 @@ double rel_stddev_stats(double stddev, double avg)
 
        return pct;
 }
+
+bool __perf_evsel_stat__is(struct perf_evsel *evsel,
+                          enum perf_stat_evsel_id id)
+{
+       struct perf_stat *ps = evsel->priv;
+
+       return ps->id == id;
+}
+
+#define ID(id, name) [PERF_STAT_EVSEL_ID__##id] = #name
+static const char *id_str[PERF_STAT_EVSEL_ID__MAX] = {
+       ID(NONE,                x),
+       ID(CYCLES_IN_TX,        cpu/cycles-t/),
+       ID(TRANSACTION_START,   cpu/tx-start/),
+       ID(ELISION_START,       cpu/el-start/),
+       ID(CYCLES_IN_TX_CP,     cpu/cycles-ct/),
+};
+#undef ID
+
+void perf_stat_evsel_id_init(struct perf_evsel *evsel)
+{
+       struct perf_stat *ps = evsel->priv;
+       int i;
+
+       /* ps->id is 0 hence PERF_STAT_EVSEL_ID__NONE by default */
+
+       for (i = 0; i < PERF_STAT_EVSEL_ID__MAX; i++) {
+               if (!strcmp(perf_evsel__name(evsel), id_str[i])) {
+                       ps->id = i;
+                       break;
+               }
+       }
+}
+
+struct perf_counts *perf_counts__new(int ncpus)
+{
+       int size = sizeof(struct perf_counts) +
+                  ncpus * sizeof(struct perf_counts_values);
+
+       return zalloc(size);
+}
+
+void perf_counts__delete(struct perf_counts *counts)
+{
+       free(counts);
+}
+
+static void perf_counts__reset(struct perf_counts *counts, int ncpus)
+{
+       memset(counts, 0, (sizeof(*counts) +
+              (ncpus * sizeof(struct perf_counts_values))));
+}
+
+void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus)
+{
+       perf_counts__reset(evsel->counts, ncpus);
+}
+
+int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
+{
+       evsel->counts = perf_counts__new(ncpus);
+       return evsel->counts != NULL ? 0 : -ENOMEM;
+}
+
+void perf_evsel__free_counts(struct perf_evsel *evsel)
+{
+       perf_counts__delete(evsel->counts);
+       evsel->counts = NULL;
+}
index 5667fc3e39cf45fe31f099e702a68eeeb202d16f..093dc3cb28dd3f62cb593095dfdc9c59317e0446 100644 (file)
@@ -2,6 +2,7 @@
 #define __PERF_STATS_H
 
 #include <linux/types.h>
+#include <stdio.h>
 
 struct stats
 {
@@ -9,6 +10,27 @@ struct stats
        u64 max, min;
 };
 
+enum perf_stat_evsel_id {
+       PERF_STAT_EVSEL_ID__NONE = 0,
+       PERF_STAT_EVSEL_ID__CYCLES_IN_TX,
+       PERF_STAT_EVSEL_ID__TRANSACTION_START,
+       PERF_STAT_EVSEL_ID__ELISION_START,
+       PERF_STAT_EVSEL_ID__CYCLES_IN_TX_CP,
+       PERF_STAT_EVSEL_ID__MAX,
+};
+
+struct perf_stat {
+       struct stats            res_stats[3];
+       enum perf_stat_evsel_id id;
+};
+
+enum aggr_mode {
+       AGGR_NONE,
+       AGGR_GLOBAL,
+       AGGR_SOCKET,
+       AGGR_CORE,
+};
+
 void update_stats(struct stats *stats, u64 val);
 double avg_stats(struct stats *stats);
 double stddev_stats(struct stats *stats);
@@ -22,4 +44,28 @@ static inline void init_stats(struct stats *stats)
        stats->min  = (u64) -1;
        stats->max  = 0;
 }
+
+struct perf_evsel;
+bool __perf_evsel_stat__is(struct perf_evsel *evsel,
+                          enum perf_stat_evsel_id id);
+
+#define perf_stat_evsel__is(evsel, id) \
+       __perf_evsel_stat__is(evsel, PERF_STAT_EVSEL_ID__ ## id)
+
+void perf_stat_evsel_id_init(struct perf_evsel *evsel);
+
+extern struct stats walltime_nsecs_stats;
+
+void perf_stat__reset_shadow_stats(void);
+void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 *count,
+                                   int cpu);
+void perf_stat__print_shadow_stats(FILE *out, struct perf_evsel *evsel,
+                                  double avg, int cpu, enum aggr_mode aggr);
+
+struct perf_counts *perf_counts__new(int ncpus);
+void perf_counts__delete(struct perf_counts *counts);
+
+void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus);
+int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus);
+void perf_evsel__free_counts(struct perf_evsel *evsel);
 #endif
index 79a757a2a15c22db5eec8494d662071cde57a12e..bcae659b65462cddff5c03c2c41a8fc675ad05bc 100644 (file)
@@ -170,6 +170,46 @@ struct strfilter *strfilter__new(const char *rules, const char **err)
        return filter;
 }
 
+static int strfilter__append(struct strfilter *filter, bool _or,
+                            const char *rules, const char **err)
+{
+       struct strfilter_node *right, *root;
+       const char *ep = NULL;
+
+       if (!filter || !rules)
+               return -EINVAL;
+
+       right = strfilter_node__new(rules, &ep);
+       if (!right || *ep != '\0') {
+               if (err)
+                       *err = ep;
+               goto error;
+       }
+       root = strfilter_node__alloc(_or ? OP_or : OP_and, filter->root, right);
+       if (!root) {
+               ep = NULL;
+               goto error;
+       }
+
+       filter->root = root;
+       return 0;
+
+error:
+       strfilter_node__delete(right);
+       return ep ? -EINVAL : -ENOMEM;
+}
+
+int strfilter__or(struct strfilter *filter, const char *rules, const char **err)
+{
+       return strfilter__append(filter, true, rules, err);
+}
+
+int strfilter__and(struct strfilter *filter, const char *rules,
+                  const char **err)
+{
+       return strfilter__append(filter, false, rules, err);
+}
+
 static bool strfilter_node__compare(struct strfilter_node *node,
                                    const char *str)
 {
@@ -197,3 +237,70 @@ bool strfilter__compare(struct strfilter *filter, const char *str)
                return false;
        return strfilter_node__compare(filter->root, str);
 }
+
+static int strfilter_node__sprint(struct strfilter_node *node, char *buf);
+
+/* sprint node in parenthesis if needed */
+static int strfilter_node__sprint_pt(struct strfilter_node *node, char *buf)
+{
+       int len;
+       int pt = node->r ? 2 : 0;       /* don't need to check node->l */
+
+       if (buf && pt)
+               *buf++ = '(';
+       len = strfilter_node__sprint(node, buf);
+       if (len < 0)
+               return len;
+       if (buf && pt)
+               *(buf + len) = ')';
+       return len + pt;
+}
+
+static int strfilter_node__sprint(struct strfilter_node *node, char *buf)
+{
+       int len = 0, rlen;
+
+       if (!node || !node->p)
+               return -EINVAL;
+
+       switch (*node->p) {
+       case '|':
+       case '&':
+               len = strfilter_node__sprint_pt(node->l, buf);
+               if (len < 0)
+                       return len;
+       case '!':
+               if (buf) {
+                       *(buf + len++) = *node->p;
+                       buf += len;
+               } else
+                       len++;
+               rlen = strfilter_node__sprint_pt(node->r, buf);
+               if (rlen < 0)
+                       return rlen;
+               len += rlen;
+               break;
+       default:
+               len = strlen(node->p);
+               if (buf)
+                       strcpy(buf, node->p);
+       }
+
+       return len;
+}
+
+char *strfilter__string(struct strfilter *filter)
+{
+       int len;
+       char *ret = NULL;
+
+       len = strfilter_node__sprint(filter->root, NULL);
+       if (len < 0)
+               return NULL;
+
+       ret = malloc(len + 1);
+       if (ret)
+               strfilter_node__sprint(filter->root, ret);
+
+       return ret;
+}
index fe611f3c9e3965007a7ea5ed9f3fbbc768b5f271..cff5eda88728b20e9ebae22d7476ed14f367869c 100644 (file)
@@ -28,6 +28,32 @@ struct strfilter {
  */
 struct strfilter *strfilter__new(const char *rules, const char **err);
 
+/**
+ * strfilter__or - Append an additional rule by logical-or
+ * @filter: Original string filter
+ * @rules: Filter rule to be appended at left of the root of
+ *         @filter by using logical-or.
+ * @err: Pointer which points an error detected on @rules
+ *
+ * Parse @rules and join it to the @filter by using logical-or.
+ * Return 0 if success, or return the error code.
+ */
+int strfilter__or(struct strfilter *filter,
+                 const char *rules, const char **err);
+
+/**
+ * strfilter__add - Append an additional rule by logical-and
+ * @filter: Original string filter
+ * @rules: Filter rule to be appended at left of the root of
+ *         @filter by using logical-and.
+ * @err: Pointer which points an error detected on @rules
+ *
+ * Parse @rules and join it to the @filter by using logical-and.
+ * Return 0 if success, or return the error code.
+ */
+int strfilter__and(struct strfilter *filter,
+                  const char *rules, const char **err);
+
 /**
  * strfilter__compare - compare given string and a string filter
  * @filter: String filter
@@ -45,4 +71,13 @@ bool strfilter__compare(struct strfilter *filter, const char *str);
  */
 void strfilter__delete(struct strfilter *filter);
 
+/**
+ * strfilter__string - Reconstruct a rule string from filter
+ * @filter: String filter to reconstruct
+ *
+ * Reconstruct a rule string from @filter. This will be good for
+ * debug messages. Note that returning string must be freed afterward.
+ */
+char *strfilter__string(struct strfilter *filter);
+
 #endif
index a7ab6063e0389488a420680db46b96743366c1c2..65f7e389ae0996cae131cbfbcd2196181f15f1e7 100644 (file)
@@ -630,6 +630,11 @@ void symsrc__destroy(struct symsrc *ss)
        close(ss->fd);
 }
 
+bool __weak elf__needs_adjust_symbols(GElf_Ehdr ehdr)
+{
+       return ehdr.e_type == ET_EXEC || ehdr.e_type == ET_REL;
+}
+
 int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
                 enum dso_binary_type type)
 {
@@ -678,6 +683,7 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
                }
 
                if (!dso__build_id_equal(dso, build_id)) {
+                       pr_debug("%s: build id mismatch for %s.\n", __func__, name);
                        dso->load_errno = DSO_LOAD_ERRNO__MISMATCHING_BUILDID;
                        goto out_elf_end;
                }
@@ -711,8 +717,7 @@ int symsrc__init(struct symsrc *ss, struct dso *dso, const char *name,
                                                     ".gnu.prelink_undo",
                                                     NULL) != NULL);
        } else {
-               ss->adjust_symbols = ehdr.e_type == ET_EXEC ||
-                                    ehdr.e_type == ET_REL;
+               ss->adjust_symbols = elf__needs_adjust_symbols(ehdr);
        }
 
        ss->name   = strdup(name);
@@ -771,6 +776,8 @@ static bool want_demangle(bool is_kernel_sym)
        return is_kernel_sym ? symbol_conf.demangle_kernel : symbol_conf.demangle;
 }
 
+void __weak arch__elf_sym_adjust(GElf_Sym *sym __maybe_unused) { }
+
 int dso__load_sym(struct dso *dso, struct map *map,
                  struct symsrc *syms_ss, struct symsrc *runtime_ss,
                  symbol_filter_t filter, int kmodule)
@@ -935,6 +942,8 @@ int dso__load_sym(struct dso *dso, struct map *map,
                    (sym.st_value & 1))
                        --sym.st_value;
 
+               arch__elf_sym_adjust(&sym);
+
                if (dso->kernel || kmodule) {
                        char dso_name[PATH_MAX];
 
@@ -963,8 +972,10 @@ int dso__load_sym(struct dso *dso, struct map *map,
                                        map->unmap_ip = map__unmap_ip;
                                        /* Ensure maps are correctly ordered */
                                        if (kmaps) {
+                                               map__get(map);
                                                map_groups__remove(kmaps, map);
                                                map_groups__insert(kmaps, map);
+                                               map__put(map);
                                        }
                                }
 
@@ -1005,7 +1016,7 @@ int dso__load_sym(struct dso *dso, struct map *map,
                                curr_map = map__new2(start, curr_dso,
                                                     map->type);
                                if (curr_map == NULL) {
-                                       dso__delete(curr_dso);
+                                       dso__put(curr_dso);
                                        goto out_elf_end;
                                }
                                if (adjust_kernel_syms) {
@@ -1020,11 +1031,7 @@ int dso__load_sym(struct dso *dso, struct map *map,
                                }
                                curr_dso->symtab_type = dso->symtab_type;
                                map_groups__insert(kmaps, curr_map);
-                               /*
-                                * The new DSO should go to the kernel DSOS
-                                */
-                               dsos__add(&map->groups->machine->kernel_dsos,
-                                         curr_dso);
+                               dsos__add(&map->groups->machine->dsos, curr_dso);
                                dso__set_loaded(curr_dso, map->type);
                        } else
                                curr_dso = curr_map->dso;
index 201f6c4ca738ddffb46d5270a876cf96e8da9322..504f2d73b7eefe2699349ac75c5cc3b875961375 100644 (file)
@@ -85,8 +85,17 @@ static int prefix_underscores_count(const char *str)
        return tail - str;
 }
 
-#define SYMBOL_A 0
-#define SYMBOL_B 1
+int __weak arch__choose_best_symbol(struct symbol *syma,
+                                   struct symbol *symb __maybe_unused)
+{
+       /* Avoid "SyS" kernel syscall aliases */
+       if (strlen(syma->name) >= 3 && !strncmp(syma->name, "SyS", 3))
+               return SYMBOL_B;
+       if (strlen(syma->name) >= 10 && !strncmp(syma->name, "compat_SyS", 10))
+               return SYMBOL_B;
+
+       return SYMBOL_A;
+}
 
 static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
 {
@@ -134,13 +143,7 @@ static int choose_best_symbol(struct symbol *syma, struct symbol *symb)
        else if (na < nb)
                return SYMBOL_B;
 
-       /* Avoid "SyS" kernel syscall aliases */
-       if (na >= 3 && !strncmp(syma->name, "SyS", 3))
-               return SYMBOL_B;
-       if (na >= 10 && !strncmp(syma->name, "compat_SyS", 10))
-               return SYMBOL_B;
-
-       return SYMBOL_A;
+       return arch__choose_best_symbol(syma, symb);
 }
 
 void symbols__fixup_duplicate(struct rb_root *symbols)
@@ -199,18 +202,18 @@ void symbols__fixup_end(struct rb_root *symbols)
 
 void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
 {
-       struct map *prev, *curr;
-       struct rb_node *nd, *prevnd = rb_first(&mg->maps[type]);
+       struct maps *maps = &mg->maps[type];
+       struct map *next, *curr;
 
-       if (prevnd == NULL)
-               return;
+       pthread_rwlock_wrlock(&maps->lock);
 
-       curr = rb_entry(prevnd, struct map, rb_node);
+       curr = maps__first(maps);
+       if (curr == NULL)
+               goto out_unlock;
 
-       for (nd = rb_next(prevnd); nd; nd = rb_next(nd)) {
-               prev = curr;
-               curr = rb_entry(nd, struct map, rb_node);
-               prev->end = curr->start;
+       for (next = map__next(curr); next; next = map__next(curr)) {
+               curr->end = next->start;
+               curr = next;
        }
 
        /*
@@ -218,6 +221,9 @@ void __map_groups__fixup_end(struct map_groups *mg, enum map_type type)
         * last map final address.
         */
        curr->end = ~0ULL;
+
+out_unlock:
+       pthread_rwlock_unlock(&maps->lock);
 }
 
 struct symbol *symbol__new(u64 start, u64 len, u8 binding, const char *name)
@@ -397,7 +403,7 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
                                            const char *name)
 {
        struct rb_node *n;
-       struct symbol_name_rb_node *s;
+       struct symbol_name_rb_node *s = NULL;
 
        if (symbols == NULL)
                return NULL;
@@ -408,7 +414,7 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
                int cmp;
 
                s = rb_entry(n, struct symbol_name_rb_node, rb_node);
-               cmp = strcmp(name, s->sym.name);
+               cmp = arch__compare_symbol_names(name, s->sym.name);
 
                if (cmp < 0)
                        n = n->rb_left;
@@ -426,7 +432,7 @@ static struct symbol *symbols__find_by_name(struct rb_root *symbols,
                struct symbol_name_rb_node *tmp;
 
                tmp = rb_entry(n, struct symbol_name_rb_node, rb_node);
-               if (strcmp(tmp->sym.name, s->sym.name))
+               if (arch__compare_symbol_names(tmp->sym.name, s->sym.name))
                        break;
 
                s = tmp;
@@ -653,14 +659,14 @@ static int dso__split_kallsyms_for_kcore(struct dso *dso, struct map *map,
                curr_map = map_groups__find(kmaps, map->type, pos->start);
 
                if (!curr_map || (filter && filter(curr_map, pos))) {
-                       rb_erase(&pos->rb_node, root);
+                       rb_erase_init(&pos->rb_node, root);
                        symbol__delete(pos);
                } else {
                        pos->start -= curr_map->start - curr_map->pgoff;
                        if (pos->end)
                                pos->end -= curr_map->start - curr_map->pgoff;
                        if (curr_map != map) {
-                               rb_erase(&pos->rb_node, root);
+                               rb_erase_init(&pos->rb_node, root);
                                symbols__insert(
                                        &curr_map->dso->symbols[curr_map->type],
                                        pos);
@@ -780,7 +786,7 @@ static int dso__split_kallsyms(struct dso *dso, struct map *map, u64 delta,
 
                        curr_map = map__new2(pos->start, ndso, map->type);
                        if (curr_map == NULL) {
-                               dso__delete(ndso);
+                               dso__put(ndso);
                                return -1;
                        }
 
@@ -1167,20 +1173,23 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
        /* Add new maps */
        while (!list_empty(&md.maps)) {
                new_map = list_entry(md.maps.next, struct map, node);
-               list_del(&new_map->node);
+               list_del_init(&new_map->node);
                if (new_map == replacement_map) {
                        map->start      = new_map->start;
                        map->end        = new_map->end;
                        map->pgoff      = new_map->pgoff;
                        map->map_ip     = new_map->map_ip;
                        map->unmap_ip   = new_map->unmap_ip;
-                       map__delete(new_map);
                        /* Ensure maps are correctly ordered */
+                       map__get(map);
                        map_groups__remove(kmaps, map);
                        map_groups__insert(kmaps, map);
+                       map__put(map);
                } else {
                        map_groups__insert(kmaps, new_map);
                }
+
+               map__put(new_map);
        }
 
        /*
@@ -1205,8 +1214,8 @@ static int dso__load_kcore(struct dso *dso, struct map *map,
 out_err:
        while (!list_empty(&md.maps)) {
                map = list_entry(md.maps.next, struct map, node);
-               list_del(&map->node);
-               map__delete(map);
+               list_del_init(&map->node);
+               map__put(map);
        }
        close(fd);
        return -EINVAL;
@@ -1355,7 +1364,7 @@ static bool dso__is_compatible_symtab_type(struct dso *dso, bool kmod,
        case DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP:
                /*
                 * kernel modules know their symtab type - it's set when
-                * creating a module dso in machine__new_module().
+                * creating a module dso in machine__findnew_module_map().
                 */
                return kmod && dso->symtab_type == type;
 
@@ -1380,12 +1389,22 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
        struct symsrc *syms_ss = NULL, *runtime_ss = NULL;
        bool kmod;
 
-       dso__set_loaded(dso, map->type);
+       pthread_mutex_lock(&dso->lock);
+
+       /* check again under the dso->lock */
+       if (dso__loaded(dso, map->type)) {
+               ret = 1;
+               goto out;
+       }
 
-       if (dso->kernel == DSO_TYPE_KERNEL)
-               return dso__load_kernel_sym(dso, map, filter);
-       else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
-               return dso__load_guest_kernel_sym(dso, map, filter);
+       if (dso->kernel) {
+               if (dso->kernel == DSO_TYPE_KERNEL)
+                       ret = dso__load_kernel_sym(dso, map, filter);
+               else if (dso->kernel == DSO_TYPE_GUEST_KERNEL)
+                       ret = dso__load_guest_kernel_sym(dso, map, filter);
+
+               goto out;
+       }
 
        if (map->groups && map->groups->machine)
                machine = map->groups->machine;
@@ -1398,18 +1417,18 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
                struct stat st;
 
                if (lstat(dso->name, &st) < 0)
-                       return -1;
+                       goto out;
 
                if (st.st_uid && (st.st_uid != geteuid())) {
                        pr_warning("File %s not owned by current user or root, "
                                "ignoring it.\n", dso->name);
-                       return -1;
+                       goto out;
                }
 
                ret = dso__load_perf_map(dso, map, filter);
                dso->symtab_type = ret > 0 ? DSO_BINARY_TYPE__JAVA_JIT :
                                             DSO_BINARY_TYPE__NOT_FOUND;
-               return ret;
+               goto out;
        }
 
        if (machine)
@@ -1417,7 +1436,7 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
 
        name = malloc(PATH_MAX);
        if (!name)
-               return -1;
+               goto out;
 
        kmod = dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE ||
                dso->symtab_type == DSO_BINARY_TYPE__SYSTEM_PATH_KMODULE_COMP ||
@@ -1498,23 +1517,32 @@ int dso__load(struct dso *dso, struct map *map, symbol_filter_t filter)
 out_free:
        free(name);
        if (ret < 0 && strstr(dso->name, " (deleted)") != NULL)
-               return 0;
+               ret = 0;
+out:
+       dso__set_loaded(dso, map->type);
+       pthread_mutex_unlock(&dso->lock);
+
        return ret;
 }
 
 struct map *map_groups__find_by_name(struct map_groups *mg,
                                     enum map_type type, const char *name)
 {
-       struct rb_node *nd;
+       struct maps *maps = &mg->maps[type];
+       struct map *map;
 
-       for (nd = rb_first(&mg->maps[type]); nd; nd = rb_next(nd)) {
-               struct map *map = rb_entry(nd, struct map, rb_node);
+       pthread_rwlock_rdlock(&maps->lock);
 
+       for (map = maps__first(maps); map; map = map__next(map)) {
                if (map->dso && strcmp(map->dso->short_name, name) == 0)
-                       return map;
+                       goto out_unlock;
        }
 
-       return NULL;
+       map = NULL;
+
+out_unlock:
+       pthread_rwlock_unlock(&maps->lock);
+       return map;
 }
 
 int dso__load_vmlinux(struct dso *dso, struct map *map,
@@ -1802,6 +1830,7 @@ static void vmlinux_path__exit(void)
 {
        while (--vmlinux_path__nr_entries >= 0)
                zfree(&vmlinux_path[vmlinux_path__nr_entries]);
+       vmlinux_path__nr_entries = 0;
 
        zfree(&vmlinux_path);
 }
index 09561500164a07997b49ca744f7f557ee200f043..bef47ead1d9bd1efc5e9620f04b13714286ff616 100644 (file)
@@ -158,8 +158,6 @@ struct ref_reloc_sym {
 struct map_symbol {
        struct map    *map;
        struct symbol *sym;
-       bool          unfolded;
-       bool          has_children;
 };
 
 struct addr_map_symbol {
@@ -303,4 +301,14 @@ int setup_list(struct strlist **list, const char *list_str,
 int setup_intlist(struct intlist **list, const char *list_str,
                  const char *list_name);
 
+#ifdef HAVE_LIBELF_SUPPORT
+bool elf__needs_adjust_symbols(GElf_Ehdr ehdr);
+void arch__elf_sym_adjust(GElf_Sym *sym);
+#endif
+
+#define SYMBOL_A 0
+#define SYMBOL_B 1
+
+int arch__choose_best_symbol(struct symbol *syma, struct symbol *symb);
+
 #endif /* __PERF_SYMBOL */
index 9ed59a452d1ff82966fd4eb99697d9bc3d9dd604..679688e70ae7e72e73d14cc7659cb965cc4d7016 100644 (file)
@@ -219,7 +219,7 @@ static int thread_stack__call_return(struct thread *thread,
        return crp->process(&cr, crp->data);
 }
 
-static int thread_stack__flush(struct thread *thread, struct thread_stack *ts)
+static int __thread_stack__flush(struct thread *thread, struct thread_stack *ts)
 {
        struct call_return_processor *crp = ts->crp;
        int err;
@@ -242,6 +242,14 @@ static int thread_stack__flush(struct thread *thread, struct thread_stack *ts)
        return 0;
 }
 
+int thread_stack__flush(struct thread *thread)
+{
+       if (thread->ts)
+               return __thread_stack__flush(thread, thread->ts);
+
+       return 0;
+}
+
 int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
                        u64 to_ip, u16 insn_len, u64 trace_nr)
 {
@@ -264,7 +272,7 @@ int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
         */
        if (trace_nr != thread->ts->trace_nr) {
                if (thread->ts->trace_nr)
-                       thread_stack__flush(thread, thread->ts);
+                       __thread_stack__flush(thread, thread->ts);
                thread->ts->trace_nr = trace_nr;
        }
 
@@ -297,7 +305,7 @@ void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr)
 
        if (trace_nr != thread->ts->trace_nr) {
                if (thread->ts->trace_nr)
-                       thread_stack__flush(thread, thread->ts);
+                       __thread_stack__flush(thread, thread->ts);
                thread->ts->trace_nr = trace_nr;
        }
 }
@@ -305,7 +313,7 @@ void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr)
 void thread_stack__free(struct thread *thread)
 {
        if (thread->ts) {
-               thread_stack__flush(thread, thread->ts);
+               __thread_stack__flush(thread, thread->ts);
                zfree(&thread->ts->stack);
                zfree(&thread->ts);
        }
@@ -689,7 +697,7 @@ int thread_stack__process(struct thread *thread, struct comm *comm,
 
        /* Flush stack on exec */
        if (ts->comm != comm && thread->pid_ == thread->tid) {
-               err = thread_stack__flush(thread, ts);
+               err = __thread_stack__flush(thread, ts);
                if (err)
                        return err;
                ts->comm = comm;
index b843bbef8ba2177a66d3b2a3cebb8ac56d8b6f44..e1528f1374c3e5131efe8c2293ef9a6736ea3ed4 100644 (file)
@@ -96,6 +96,7 @@ int thread_stack__event(struct thread *thread, u32 flags, u64 from_ip,
 void thread_stack__set_trace_nr(struct thread *thread, u64 trace_nr);
 void thread_stack__sample(struct thread *thread, struct ip_callchain *chain,
                          size_t sz, u64 ip);
+int thread_stack__flush(struct thread *thread);
 void thread_stack__free(struct thread *thread);
 
 struct call_return_processor *
index 1c8fbc9588c5fddc978e8a07562aecc167a2a752..28c4b746baa19bef9830814c4fe7c69f1be0b06b 100644 (file)
@@ -18,7 +18,7 @@ int thread__init_map_groups(struct thread *thread, struct machine *machine)
        if (pid == thread->tid || pid == -1) {
                thread->mg = map_groups__new(machine);
        } else {
-               leader = machine__findnew_thread(machine, pid, pid);
+               leader = __machine__findnew_thread(machine, pid, pid);
                if (leader)
                        thread->mg = map_groups__get(leader->mg);
        }
@@ -53,7 +53,8 @@ struct thread *thread__new(pid_t pid, pid_t tid)
                        goto err_thread;
 
                list_add(&comm->list, &thread->comm_list);
-
+               atomic_set(&thread->refcnt, 0);
+               RB_CLEAR_NODE(&thread->rb_node);
        }
 
        return thread;
@@ -67,6 +68,8 @@ void thread__delete(struct thread *thread)
 {
        struct comm *comm, *tmp;
 
+       BUG_ON(!RB_EMPTY_NODE(&thread->rb_node));
+
        thread_stack__free(thread);
 
        if (thread->mg) {
@@ -84,13 +87,14 @@ void thread__delete(struct thread *thread)
 
 struct thread *thread__get(struct thread *thread)
 {
-       ++thread->refcnt;
+       if (thread)
+               atomic_inc(&thread->refcnt);
        return thread;
 }
 
 void thread__put(struct thread *thread)
 {
-       if (thread && --thread->refcnt == 0) {
+       if (thread && atomic_dec_and_test(&thread->refcnt)) {
                list_del_init(&thread->node);
                thread__delete(thread);
        }
index 9b8a54dc34a81963d8026e226bcd3334713b1606..a0ac0317affb5ffc46f69dc00c4c258d0c40c684 100644 (file)
@@ -1,6 +1,7 @@
 #ifndef __PERF_THREAD_H
 #define __PERF_THREAD_H
 
+#include <linux/atomic.h>
 #include <linux/rbtree.h>
 #include <linux/list.h>
 #include <unistd.h>
@@ -21,12 +22,12 @@ struct thread {
        pid_t                   tid;
        pid_t                   ppid;
        int                     cpu;
-       int                     refcnt;
+       atomic_t                refcnt;
        char                    shortname[3];
        bool                    comm_set;
+       int                     comm_len;
        bool                    dead; /* if set thread has exited */
        struct list_head        comm_list;
-       int                     comm_len;
        u64                     db_id;
 
        void                    *priv;
index f93b9734735b9478d3b8da0a2edf6be03403079d..f4822bd03709af52aba3eed89d50f06bc53e4f86 100644 (file)
@@ -20,6 +20,15 @@ static int filter(const struct dirent *dir)
                return 1;
 }
 
+static struct thread_map *thread_map__realloc(struct thread_map *map, int nr)
+{
+       size_t size = sizeof(*map) + sizeof(pid_t) * nr;
+
+       return realloc(map, size);
+}
+
+#define thread_map__alloc(__nr) thread_map__realloc(NULL, __nr)
+
 struct thread_map *thread_map__new_by_pid(pid_t pid)
 {
        struct thread_map *threads;
@@ -33,7 +42,7 @@ struct thread_map *thread_map__new_by_pid(pid_t pid)
        if (items <= 0)
                return NULL;
 
-       threads = malloc(sizeof(*threads) + sizeof(pid_t) * items);
+       threads = thread_map__alloc(items);
        if (threads != NULL) {
                for (i = 0; i < items; i++)
                        threads->map[i] = atoi(namelist[i]->d_name);
@@ -49,7 +58,7 @@ struct thread_map *thread_map__new_by_pid(pid_t pid)
 
 struct thread_map *thread_map__new_by_tid(pid_t tid)
 {
-       struct thread_map *threads = malloc(sizeof(*threads) + sizeof(pid_t));
+       struct thread_map *threads = thread_map__alloc(1);
 
        if (threads != NULL) {
                threads->map[0] = tid;
@@ -65,8 +74,8 @@ struct thread_map *thread_map__new_by_uid(uid_t uid)
        int max_threads = 32, items, i;
        char path[256];
        struct dirent dirent, *next, **namelist = NULL;
-       struct thread_map *threads = malloc(sizeof(*threads) +
-                                           max_threads * sizeof(pid_t));
+       struct thread_map *threads = thread_map__alloc(max_threads);
+
        if (threads == NULL)
                goto out;
 
@@ -185,8 +194,7 @@ static struct thread_map *thread_map__new_by_pid_str(const char *pid_str)
                        goto out_free_threads;
 
                total_tasks += items;
-               nt = realloc(threads, (sizeof(*threads) +
-                                      sizeof(pid_t) * total_tasks));
+               nt = thread_map__realloc(threads, total_tasks);
                if (nt == NULL)
                        goto out_free_namelist;
 
@@ -216,7 +224,7 @@ out_free_threads:
 
 struct thread_map *thread_map__new_dummy(void)
 {
-       struct thread_map *threads = malloc(sizeof(*threads) + sizeof(pid_t));
+       struct thread_map *threads = thread_map__alloc(1);
 
        if (threads != NULL) {
                threads->map[0] = -1;
@@ -253,7 +261,7 @@ static struct thread_map *thread_map__new_by_tid_str(const char *tid_str)
                        continue;
 
                ntasks++;
-               nt = realloc(threads, sizeof(*threads) + sizeof(pid_t) * ntasks);
+               nt = thread_map__realloc(threads, ntasks);
 
                if (nt == NULL)
                        goto out_free_threads;
index 51d9e56c0f841d89f730f00e72e984a867b9f4fc..c307dd4382863dd7f68314885115138f489c385b 100644 (file)
@@ -3,6 +3,8 @@
 
 #include <stdbool.h>
 
+#include <linux/types.h>
+
 struct perf_session;
 union perf_event;
 struct perf_evlist;
@@ -29,6 +31,9 @@ typedef int (*event_op2)(struct perf_tool *tool, union perf_event *event,
 typedef int (*event_oe)(struct perf_tool *tool, union perf_event *event,
                        struct ordered_events *oe);
 
+typedef s64 (*event_op3)(struct perf_tool *tool, union perf_event *event,
+                        struct perf_session *session);
+
 struct perf_tool {
        event_sample    sample,
                        read;
@@ -38,13 +43,19 @@ struct perf_tool {
                        fork,
                        exit,
                        lost,
+                       lost_samples,
+                       aux,
+                       itrace_start,
                        throttle,
                        unthrottle;
        event_attr_op   attr;
        event_op2       tracing_data;
        event_oe        finished_round;
        event_op2       build_id,
-                       id_index;
+                       id_index,
+                       auxtrace_info,
+                       auxtrace_error;
+       event_op3       auxtrace;
        bool            ordered_events;
        bool            ordering_requires_timestamps;
 };
index 25d6c737be3e673db1904104a83286c5fbda0b40..d4957418657ec3ab88cce7f77b70095ff21d62c8 100644 (file)
@@ -173,7 +173,7 @@ void parse_ftrace_printk(struct pevent *pevent,
        char *line;
        char *next = NULL;
        char *addr_str;
-       char *fmt;
+       char *fmt = NULL;
 
        line = strtok_r(file, "\n", &next);
        while (line) {
index 7b09a443a280429d3de68f03ced731f66b36d3fc..4c00507ee3fd2ad488642def35226711cba821fe 100644 (file)
@@ -269,13 +269,14 @@ static int read_unwind_spec_eh_frame(struct dso *dso, struct machine *machine,
        u64 offset = dso->data.eh_frame_hdr_offset;
 
        if (offset == 0) {
-               fd = dso__data_fd(dso, machine);
+               fd = dso__data_get_fd(dso, machine);
                if (fd < 0)
                        return -EINVAL;
 
                /* Check the .eh_frame section for unwinding info */
                offset = elf_section_offset(fd, ".eh_frame_hdr");
                dso->data.eh_frame_hdr_offset = offset;
+               dso__data_put_fd(dso);
        }
 
        if (offset)
@@ -294,13 +295,14 @@ static int read_unwind_spec_debug_frame(struct dso *dso,
        u64 ofs = dso->data.debug_frame_offset;
 
        if (ofs == 0) {
-               fd = dso__data_fd(dso, machine);
+               fd = dso__data_get_fd(dso, machine);
                if (fd < 0)
                        return -EINVAL;
 
                /* Check the .debug_frame section for unwinding info */
                ofs = elf_section_offset(fd, ".debug_frame");
                dso->data.debug_frame_offset = ofs;
+               dso__data_put_fd(dso);
        }
 
        *offset = ofs;
@@ -353,10 +355,13 @@ find_proc_info(unw_addr_space_t as, unw_word_t ip, unw_proc_info_t *pi,
 #ifndef NO_LIBUNWIND_DEBUG_FRAME
        /* Check the .debug_frame section for unwinding info */
        if (!read_unwind_spec_debug_frame(map->dso, ui->machine, &segbase)) {
-               int fd = dso__data_fd(map->dso, ui->machine);
+               int fd = dso__data_get_fd(map->dso, ui->machine);
                int is_exec = elf_is_exec(fd, map->dso->name);
                unw_word_t base = is_exec ? 0 : map->start;
 
+               if (fd >= 0)
+                       dso__data_put_fd(map->dso);
+
                memset(&di, 0, sizeof(di));
                if (dwarf_find_debug_frame(0, &di, ip, base, map->dso->name,
                                           map->start, map->end))
index 4ee6d0d4c9931752e76abe15dd098468c4e0c01f..edc2d633b33224530e9dcb7780877bf7d1be08b3 100644 (file)
@@ -72,20 +72,60 @@ int mkdir_p(char *path, mode_t mode)
        return (stat(path, &st) && mkdir(path, mode)) ? -1 : 0;
 }
 
-static int slow_copyfile(const char *from, const char *to, mode_t mode)
+int rm_rf(char *path)
+{
+       DIR *dir;
+       int ret = 0;
+       struct dirent *d;
+       char namebuf[PATH_MAX];
+
+       dir = opendir(path);
+       if (dir == NULL)
+               return 0;
+
+       while ((d = readdir(dir)) != NULL && !ret) {
+               struct stat statbuf;
+
+               if (!strcmp(d->d_name, ".") || !strcmp(d->d_name, ".."))
+                       continue;
+
+               scnprintf(namebuf, sizeof(namebuf), "%s/%s",
+                         path, d->d_name);
+
+               ret = stat(namebuf, &statbuf);
+               if (ret < 0) {
+                       pr_debug("stat failed: %s\n", namebuf);
+                       break;
+               }
+
+               if (S_ISREG(statbuf.st_mode))
+                       ret = unlink(namebuf);
+               else if (S_ISDIR(statbuf.st_mode))
+                       ret = rm_rf(namebuf);
+               else {
+                       pr_debug("unknown file: %s\n", namebuf);
+                       ret = -1;
+               }
+       }
+       closedir(dir);
+
+       if (ret < 0)
+               return ret;
+
+       return rmdir(path);
+}
+
+static int slow_copyfile(const char *from, const char *to)
 {
        int err = -1;
        char *line = NULL;
        size_t n;
        FILE *from_fp = fopen(from, "r"), *to_fp;
-       mode_t old_umask;
 
        if (from_fp == NULL)
                goto out;
 
-       old_umask = umask(mode ^ 0777);
        to_fp = fopen(to, "w");
-       umask(old_umask);
        if (to_fp == NULL)
                goto out_fclose_from;
 
@@ -102,42 +142,81 @@ out:
        return err;
 }
 
+int copyfile_offset(int ifd, loff_t off_in, int ofd, loff_t off_out, u64 size)
+{
+       void *ptr;
+       loff_t pgoff;
+
+       pgoff = off_in & ~(page_size - 1);
+       off_in -= pgoff;
+
+       ptr = mmap(NULL, off_in + size, PROT_READ, MAP_PRIVATE, ifd, pgoff);
+       if (ptr == MAP_FAILED)
+               return -1;
+
+       while (size) {
+               ssize_t ret = pwrite(ofd, ptr + off_in, size, off_out);
+               if (ret < 0 && errno == EINTR)
+                       continue;
+               if (ret <= 0)
+                       break;
+
+               size -= ret;
+               off_in += ret;
+               off_out -= ret;
+       }
+       munmap(ptr, off_in + size);
+
+       return size ? -1 : 0;
+}
+
 int copyfile_mode(const char *from, const char *to, mode_t mode)
 {
        int fromfd, tofd;
        struct stat st;
-       void *addr;
        int err = -1;
+       char *tmp = NULL, *ptr = NULL;
 
        if (stat(from, &st))
                goto out;
 
-       if (st.st_size == 0) /* /proc? do it slowly... */
-               return slow_copyfile(from, to, mode);
-
-       fromfd = open(from, O_RDONLY);
-       if (fromfd < 0)
+       /* extra 'x' at the end is to reserve space for '.' */
+       if (asprintf(&tmp, "%s.XXXXXXx", to) < 0) {
+               tmp = NULL;
                goto out;
+       }
+       ptr = strrchr(tmp, '/');
+       if (!ptr)
+               goto out;
+       ptr = memmove(ptr + 1, ptr, strlen(ptr) - 1);
+       *ptr = '.';
 
-       tofd = creat(to, mode);
+       tofd = mkstemp(tmp);
        if (tofd < 0)
-               goto out_close_from;
+               goto out;
+
+       if (fchmod(tofd, mode))
+               goto out_close_to;
+
+       if (st.st_size == 0) { /* /proc? do it slowly... */
+               err = slow_copyfile(from, tmp);
+               goto out_close_to;
+       }
 
-       addr = mmap(NULL, st.st_size, PROT_READ, MAP_PRIVATE, fromfd, 0);
-       if (addr == MAP_FAILED)
+       fromfd = open(from, O_RDONLY);
+       if (fromfd < 0)
                goto out_close_to;
 
-       if (write(tofd, addr, st.st_size) == st.st_size)
-               err = 0;
+       err = copyfile_offset(fromfd, 0, tofd, 0, st.st_size);
 
-       munmap(addr, st.st_size);
+       close(fromfd);
 out_close_to:
        close(tofd);
-       if (err)
-               unlink(to);
-out_close_from:
-       close(fromfd);
+       if (!err)
+               err = link(tmp, to);
+       unlink(tmp);
 out:
+       free(tmp);
        return err;
 }
 
index 1ff23e04ad2730b99f78dbeef3af56ba1a45e9ec..8bce58b47a826918db8f395e3e6c2711e9bbba8e 100644 (file)
@@ -249,14 +249,20 @@ static inline int sane_case(int x, int high)
 }
 
 int mkdir_p(char *path, mode_t mode);
+int rm_rf(char *path);
 int copyfile(const char *from, const char *to);
 int copyfile_mode(const char *from, const char *to, mode_t mode);
+int copyfile_offset(int fromfd, loff_t from_ofs, int tofd, loff_t to_ofs, u64 size);
 
 s64 perf_atoll(const char *str);
 char **argv_split(const char *str, int *argcp);
 void argv_free(char **argv);
 bool strglobmatch(const char *str, const char *pat);
 bool strlazymatch(const char *str, const char *pat);
+static inline bool strisglob(const char *str)
+{
+       return strpbrk(str, "*?[") != NULL;
+}
 int strtailcmp(const char *s1, const char *s2);
 char *strxfrchar(char *s, char from, char to);
 unsigned long convert_unit(unsigned long value, char *unit);
index 5c7dd796979d0d625df236c82c30b5e22f5ac7d9..4b89118f158db458ae29cca6d20b093d81392cf6 100644 (file)
@@ -101,7 +101,7 @@ static char *get_file(struct vdso_file *vdso_file)
        return vdso;
 }
 
-void vdso__exit(struct machine *machine)
+void machine__exit_vdso(struct machine *machine)
 {
        struct vdso_info *vdso_info = machine->vdso_info;
 
@@ -120,14 +120,14 @@ void vdso__exit(struct machine *machine)
        zfree(&machine->vdso_info);
 }
 
-static struct dso *vdso__new(struct machine *machine, const char *short_name,
-                            const char *long_name)
+static struct dso *__machine__addnew_vdso(struct machine *machine, const char *short_name,
+                                         const char *long_name)
 {
        struct dso *dso;
 
        dso = dso__new(short_name);
        if (dso != NULL) {
-               dsos__add(&machine->user_dsos, dso);
+               __dsos__add(&machine->dsos, dso);
                dso__set_long_name(dso, long_name, false);
        }
 
@@ -230,27 +230,31 @@ static const char *vdso__get_compat_file(struct vdso_file *vdso_file)
        return vdso_file->temp_file_name;
 }
 
-static struct dso *vdso__findnew_compat(struct machine *machine,
-                                       struct vdso_file *vdso_file)
+static struct dso *__machine__findnew_compat(struct machine *machine,
+                                            struct vdso_file *vdso_file)
 {
        const char *file_name;
        struct dso *dso;
 
-       dso = dsos__find(&machine->user_dsos, vdso_file->dso_name, true);
+       pthread_rwlock_wrlock(&machine->dsos.lock);
+       dso = __dsos__find(&machine->dsos, vdso_file->dso_name, true);
        if (dso)
-               return dso;
+               goto out_unlock;
 
        file_name = vdso__get_compat_file(vdso_file);
        if (!file_name)
-               return NULL;
+               goto out_unlock;
 
-       return vdso__new(machine, vdso_file->dso_name, file_name);
+       dso = __machine__addnew_vdso(machine, vdso_file->dso_name, file_name);
+out_unlock:
+       pthread_rwlock_unlock(&machine->dsos.lock);
+       return dso;
 }
 
-static int vdso__dso_findnew_compat(struct machine *machine,
-                                   struct thread *thread,
-                                   struct vdso_info *vdso_info,
-                                   struct dso **dso)
+static int __machine__findnew_vdso_compat(struct machine *machine,
+                                         struct thread *thread,
+                                         struct vdso_info *vdso_info,
+                                         struct dso **dso)
 {
        enum dso_type dso_type;
 
@@ -267,10 +271,10 @@ static int vdso__dso_findnew_compat(struct machine *machine,
 
        switch (dso_type) {
        case DSO__TYPE_32BIT:
-               *dso = vdso__findnew_compat(machine, &vdso_info->vdso32);
+               *dso = __machine__findnew_compat(machine, &vdso_info->vdso32);
                return 1;
        case DSO__TYPE_X32BIT:
-               *dso = vdso__findnew_compat(machine, &vdso_info->vdsox32);
+               *dso = __machine__findnew_compat(machine, &vdso_info->vdsox32);
                return 1;
        case DSO__TYPE_UNKNOWN:
        case DSO__TYPE_64BIT:
@@ -281,35 +285,37 @@ static int vdso__dso_findnew_compat(struct machine *machine,
 
 #endif
 
-struct dso *vdso__dso_findnew(struct machine *machine,
-                             struct thread *thread __maybe_unused)
+struct dso *machine__findnew_vdso(struct machine *machine,
+                                 struct thread *thread __maybe_unused)
 {
        struct vdso_info *vdso_info;
-       struct dso *dso;
+       struct dso *dso = NULL;
 
+       pthread_rwlock_wrlock(&machine->dsos.lock);
        if (!machine->vdso_info)
                machine->vdso_info = vdso_info__new();
 
        vdso_info = machine->vdso_info;
        if (!vdso_info)
-               return NULL;
+               goto out_unlock;
 
 #if BITS_PER_LONG == 64
-       if (vdso__dso_findnew_compat(machine, thread, vdso_info, &dso))
-               return dso;
+       if (__machine__findnew_vdso_compat(machine, thread, vdso_info, &dso))
+               goto out_unlock;
 #endif
 
-       dso = dsos__find(&machine->user_dsos, DSO__NAME_VDSO, true);
+       dso = __dsos__find(&machine->dsos, DSO__NAME_VDSO, true);
        if (!dso) {
                char *file;
 
                file = get_file(&vdso_info->vdso);
-               if (!file)
-                       return NULL;
-
-               dso = vdso__new(machine, DSO__NAME_VDSO, file);
+               if (file)
+                       dso = __machine__addnew_vdso(machine, DSO__NAME_VDSO, file);
        }
 
+out_unlock:
+       dso__get(dso);
+       pthread_rwlock_unlock(&machine->dsos.lock);
        return dso;
 }
 
index d97da1616f0c5b658d8c1846b0285835b3a3478c..cdc4fabfc2124efa9c2c497dc35a5f6b77840a85 100644 (file)
@@ -23,7 +23,7 @@ bool dso__is_vdso(struct dso *dso);
 struct machine;
 struct thread;
 
-struct dso *vdso__dso_findnew(struct machine *machine, struct thread *thread);
-void vdso__exit(struct machine *machine);
+struct dso *machine__findnew_vdso(struct machine *machine, struct thread *thread);
+void machine__exit_vdso(struct machine *machine);
 
 #endif /* __PERF_VDSO__ */
index 22afbf6c536adbb291f4ee87a35edf970722fc5a..c10ba41ef3f6298eb77e624f7f1b14f41555c36c 100644 (file)
@@ -9,11 +9,19 @@ struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size)
        if (xy != NULL) {
                xy->entry_size = entry_size;
                xy->row_size   = row_size;
+               xy->entries    = xlen * ylen;
        }
 
        return xy;
 }
 
+void xyarray__reset(struct xyarray *xy)
+{
+       size_t n = xy->entries * xy->entry_size;
+
+       memset(xy->contents, 0, n);
+}
+
 void xyarray__delete(struct xyarray *xy)
 {
        free(xy);
index c488a07275dd2783f13360a341063de1d87eab6c..7f30af371b7ee692f23b9bf4aeb354584f453722 100644 (file)
@@ -6,11 +6,13 @@
 struct xyarray {
        size_t row_size;
        size_t entry_size;
+       size_t entries;
        char contents[];
 };
 
 struct xyarray *xyarray__new(int xlen, int ylen, size_t entry_size);
 void xyarray__delete(struct xyarray *xy);
+void xyarray__reset(struct xyarray *xy);
 
 static inline void *xyarray__entry(struct xyarray *xy, int x, int y)
 {
index 4039854560d0d5dffe32150562a6f11b2791033f..e367b1a85d70b3e0b7d82c5766d83a31ce006d2a 100644 (file)
@@ -9,7 +9,7 @@ endif
 
 turbostat : turbostat.c
 CFLAGS +=      -Wall
-CFLAGS +=      -DMSRHEADER='"../../../../arch/x86/include/uapi/asm/msr-index.h"'
+CFLAGS +=      -DMSRHEADER='"../../../../arch/x86/include/asm/msr-index.h"'
 
 %: %.c
        @mkdir -p $(BUILD_OUTPUT)
index bac98ca3d4ca7e4cd8efb36e79d550a43bb4d1d4..323b65edfc970b5ba5783de3c16f8b684728e47b 100644 (file)
@@ -52,6 +52,7 @@ unsigned int skip_c0;
 unsigned int skip_c1;
 unsigned int do_nhm_cstates;
 unsigned int do_snb_cstates;
+unsigned int do_knl_cstates;
 unsigned int do_pc2;
 unsigned int do_pc3;
 unsigned int do_pc6;
@@ -91,6 +92,7 @@ unsigned int do_gfx_perf_limit_reasons;
 unsigned int do_ring_perf_limit_reasons;
 unsigned int crystal_hz;
 unsigned long long tsc_hz;
+int base_cpu;
 
 #define RAPL_PKG               (1 << 0)
                                        /* 0x610 MSR_PKG_POWER_LIMIT */
@@ -316,7 +318,7 @@ void print_header(void)
 
        if (do_nhm_cstates)
                outp += sprintf(outp, "  CPU%%c1");
-       if (do_nhm_cstates && !do_slm_cstates)
+       if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates)
                outp += sprintf(outp, "  CPU%%c3");
        if (do_nhm_cstates)
                outp += sprintf(outp, "  CPU%%c6");
@@ -546,7 +548,7 @@ int format_counters(struct thread_data *t, struct core_data *c,
        if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
                goto done;
 
-       if (do_nhm_cstates && !do_slm_cstates)
+       if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates)
                outp += sprintf(outp, "%8.2f", 100.0 * c->c3/t->tsc);
        if (do_nhm_cstates)
                outp += sprintf(outp, "%8.2f", 100.0 * c->c6/t->tsc);
@@ -1018,14 +1020,17 @@ int get_counters(struct thread_data *t, struct core_data *c, struct pkg_data *p)
        if (!(t->flags & CPU_IS_FIRST_THREAD_IN_CORE))
                return 0;
 
-       if (do_nhm_cstates && !do_slm_cstates) {
+       if (do_nhm_cstates && !do_slm_cstates && !do_knl_cstates) {
                if (get_msr(cpu, MSR_CORE_C3_RESIDENCY, &c->c3))
                        return -6;
        }
 
-       if (do_nhm_cstates) {
+       if (do_nhm_cstates && !do_knl_cstates) {
                if (get_msr(cpu, MSR_CORE_C6_RESIDENCY, &c->c6))
                        return -7;
+       } else if (do_knl_cstates) {
+               if (get_msr(cpu, MSR_KNL_CORE_C6_RESIDENCY, &c->c6))
+                       return -7;
        }
 
        if (do_snb_cstates)
@@ -1150,7 +1155,7 @@ dump_nhm_platform_info(void)
        unsigned long long msr;
        unsigned int ratio;
 
-       get_msr(0, MSR_NHM_PLATFORM_INFO, &msr);
+       get_msr(base_cpu, MSR_NHM_PLATFORM_INFO, &msr);
 
        fprintf(stderr, "cpu0: MSR_NHM_PLATFORM_INFO: 0x%08llx\n", msr);
 
@@ -1162,7 +1167,7 @@ dump_nhm_platform_info(void)
        fprintf(stderr, "%d * %.0f = %.0f MHz base frequency\n",
                ratio, bclk, ratio * bclk);
 
-       get_msr(0, MSR_IA32_POWER_CTL, &msr);
+       get_msr(base_cpu, MSR_IA32_POWER_CTL, &msr);
        fprintf(stderr, "cpu0: MSR_IA32_POWER_CTL: 0x%08llx (C1E auto-promotion: %sabled)\n",
                msr, msr & 0x2 ? "EN" : "DIS");
 
@@ -1175,7 +1180,7 @@ dump_hsw_turbo_ratio_limits(void)
        unsigned long long msr;
        unsigned int ratio;
 
-       get_msr(0, MSR_TURBO_RATIO_LIMIT2, &msr);
+       get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT2, &msr);
 
        fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT2: 0x%08llx\n", msr);
 
@@ -1197,7 +1202,7 @@ dump_ivt_turbo_ratio_limits(void)
        unsigned long long msr;
        unsigned int ratio;
 
-       get_msr(0, MSR_TURBO_RATIO_LIMIT1, &msr);
+       get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT1, &msr);
 
        fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT1: 0x%08llx\n", msr);
 
@@ -1249,7 +1254,7 @@ dump_nhm_turbo_ratio_limits(void)
        unsigned long long msr;
        unsigned int ratio;
 
-       get_msr(0, MSR_TURBO_RATIO_LIMIT, &msr);
+       get_msr(base_cpu, MSR_TURBO_RATIO_LIMIT, &msr);
 
        fprintf(stderr, "cpu0: MSR_TURBO_RATIO_LIMIT: 0x%08llx\n", msr);
 
@@ -1295,12 +1300,73 @@ dump_nhm_turbo_ratio_limits(void)
        return;
 }
 
+static void
+dump_knl_turbo_ratio_limits(void)
+{
+       int cores;
+       unsigned int ratio;
+       unsigned long long msr;
+       int delta_cores;
+       int delta_ratio;
+       int i;
+
+       get_msr(base_cpu, MSR_NHM_TURBO_RATIO_LIMIT, &msr);
+
+       fprintf(stderr, "cpu0: MSR_NHM_TURBO_RATIO_LIMIT: 0x%08llx\n",
+       msr);
+
+       /**
+        * Turbo encoding in KNL is as follows:
+        * [7:0] -- Base value of number of active cores of bucket 1.
+        * [15:8] -- Base value of freq ratio of bucket 1.
+        * [20:16] -- +ve delta of number of active cores of bucket 2.
+        * i.e. active cores of bucket 2 =
+        * active cores of bucket 1 + delta
+        * [23:21] -- Negative delta of freq ratio of bucket 2.
+        * i.e. freq ratio of bucket 2 =
+        * freq ratio of bucket 1 - delta
+        * [28:24]-- +ve delta of number of active cores of bucket 3.
+        * [31:29]-- -ve delta of freq ratio of bucket 3.
+        * [36:32]-- +ve delta of number of active cores of bucket 4.
+        * [39:37]-- -ve delta of freq ratio of bucket 4.
+        * [44:40]-- +ve delta of number of active cores of bucket 5.
+        * [47:45]-- -ve delta of freq ratio of bucket 5.
+        * [52:48]-- +ve delta of number of active cores of bucket 6.
+        * [55:53]-- -ve delta of freq ratio of bucket 6.
+        * [60:56]-- +ve delta of number of active cores of bucket 7.
+        * [63:61]-- -ve delta of freq ratio of bucket 7.
+        */
+       cores = msr & 0xFF;
+       ratio = (msr >> 8) && 0xFF;
+       if (ratio > 0)
+               fprintf(stderr,
+                       "%d * %.0f = %.0f MHz max turbo %d active cores\n",
+                       ratio, bclk, ratio * bclk, cores);
+
+       for (i = 16; i < 64; i = i + 8) {
+               delta_cores = (msr >> i) & 0x1F;
+               delta_ratio = (msr >> (i + 5)) && 0x7;
+               if (!delta_cores || !delta_ratio)
+                       return;
+               cores = cores + delta_cores;
+               ratio = ratio - delta_ratio;
+
+               /** -ve ratios will make successive ratio calculations
+                * negative. Hence return instead of carrying on.
+                */
+               if (ratio > 0)
+                       fprintf(stderr,
+                               "%d * %.0f = %.0f MHz max turbo %d active cores\n",
+                               ratio, bclk, ratio * bclk, cores);
+       }
+}
+
 static void
 dump_nhm_cst_cfg(void)
 {
        unsigned long long msr;
 
-       get_msr(0, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
+       get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
 
 #define SNB_C1_AUTO_UNDEMOTE              (1UL << 27)
 #define SNB_C3_AUTO_UNDEMOTE              (1UL << 28)
@@ -1381,12 +1447,41 @@ int parse_int_file(const char *fmt, ...)
 }
 
 /*
- * cpu_is_first_sibling_in_core(cpu)
- * return 1 if given CPU is 1st HT sibling in the core
+ * get_cpu_position_in_core(cpu)
+ * return the position of the CPU among its HT siblings in the core
+ * return -1 if the sibling is not in list
  */
-int cpu_is_first_sibling_in_core(int cpu)
+int get_cpu_position_in_core(int cpu)
 {
-       return cpu == parse_int_file("/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
+       char path[64];
+       FILE *filep;
+       int this_cpu;
+       char character;
+       int i;
+
+       sprintf(path,
+               "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list",
+               cpu);
+       filep = fopen(path, "r");
+       if (filep == NULL) {
+               perror(path);
+               exit(1);
+       }
+
+       for (i = 0; i < topo.num_threads_per_core; i++) {
+               fscanf(filep, "%d", &this_cpu);
+               if (this_cpu == cpu) {
+                       fclose(filep);
+                       return i;
+               }
+
+               /* Account for no separator after last thread*/
+               if (i != (topo.num_threads_per_core - 1))
+                       fscanf(filep, "%c", &character);
+       }
+
+       fclose(filep);
+       return -1;
 }
 
 /*
@@ -1412,25 +1507,31 @@ int get_num_ht_siblings(int cpu)
 {
        char path[80];
        FILE *filep;
-       int sib1, sib2;
-       int matches;
+       int sib1;
+       int matches = 0;
        char character;
+       char str[100];
+       char *ch;
 
        sprintf(path, "/sys/devices/system/cpu/cpu%d/topology/thread_siblings_list", cpu);
        filep = fopen_or_die(path, "r");
+
        /*
         * file format:
-        * if a pair of number with a character between: 2 siblings (eg. 1-2, or 1,4)
-        * otherwinse 1 sibling (self).
+        * A ',' separated or '-' separated set of numbers
+        * (eg 1-2 or 1,3,4,5)
         */
-       matches = fscanf(filep, "%d%c%d\n", &sib1, &character, &sib2);
+       fscanf(filep, "%d%c\n", &sib1, &character);
+       fseek(filep, 0, SEEK_SET);
+       fgets(str, 100, filep);
+       ch = strchr(str, character);
+       while (ch != NULL) {
+               matches++;
+               ch = strchr(ch+1, character);
+       }
 
        fclose(filep);
-
-       if (matches == 3)
-               return 2;
-       else
-               return 1;
+       return matches+1;
 }
 
 /*
@@ -1594,8 +1695,10 @@ restart:
 void check_dev_msr()
 {
        struct stat sb;
+       char pathname[32];
 
-       if (stat("/dev/cpu/0/msr", &sb))
+       sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
+       if (stat(pathname, &sb))
                if (system("/sbin/modprobe msr > /dev/null 2>&1"))
                        err(-5, "no /dev/cpu/0/msr, Try \"# modprobe msr\" ");
 }
@@ -1608,6 +1711,7 @@ void check_permissions()
        cap_user_data_t cap_data = &cap_data_data;
        extern int capget(cap_user_header_t hdrp, cap_user_data_t datap);
        int do_exit = 0;
+       char pathname[32];
 
        /* check for CAP_SYS_RAWIO */
        cap_header->pid = getpid();
@@ -1622,7 +1726,8 @@ void check_permissions()
        }
 
        /* test file permissions */
-       if (euidaccess("/dev/cpu/0/msr", R_OK)) {
+       sprintf(pathname, "/dev/cpu/%d/msr", base_cpu);
+       if (euidaccess(pathname, R_OK)) {
                do_exit++;
                warn("/dev/cpu/0/msr open failed, try chown or chmod +r /dev/cpu/*/msr");
        }
@@ -1704,7 +1809,7 @@ int probe_nhm_msrs(unsigned int family, unsigned int model)
        default:
                return 0;
        }
-       get_msr(0, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
+       get_msr(base_cpu, MSR_NHM_SNB_PKG_CST_CFG_CTL, &msr);
 
        pkg_cstate_limit = pkg_cstate_limits[msr & 0xF];
 
@@ -1753,6 +1858,21 @@ int has_hsw_turbo_ratio_limit(unsigned int family, unsigned int model)
        }
 }
 
+int has_knl_turbo_ratio_limit(unsigned int family, unsigned int model)
+{
+       if (!genuine_intel)
+               return 0;
+
+       if (family != 6)
+               return 0;
+
+       switch (model) {
+       case 0x57:      /* Knights Landing */
+               return 1;
+       default:
+               return 0;
+       }
+}
 static void
 dump_cstate_pstate_config_info(family, model)
 {
@@ -1770,6 +1890,9 @@ dump_cstate_pstate_config_info(family, model)
        if (has_nhm_turbo_ratio_limit(family, model))
                dump_nhm_turbo_ratio_limits();
 
+       if (has_knl_turbo_ratio_limit(family, model))
+               dump_knl_turbo_ratio_limits();
+
        dump_nhm_cst_cfg();
 }
 
@@ -1801,7 +1924,7 @@ int print_epb(struct thread_data *t, struct core_data *c, struct pkg_data *p)
        if (get_msr(cpu, MSR_IA32_ENERGY_PERF_BIAS, &msr))
                return 0;
 
-       switch (msr & 0x7) {
+       switch (msr & 0xF) {
        case ENERGY_PERF_BIAS_PERFORMANCE:
                epb_string = "performance";
                break;
@@ -1925,7 +2048,7 @@ double get_tdp(model)
        unsigned long long msr;
 
        if (do_rapl & RAPL_PKG_POWER_INFO)
-               if (!get_msr(0, MSR_PKG_POWER_INFO, &msr))
+               if (!get_msr(base_cpu, MSR_PKG_POWER_INFO, &msr))
                        return ((msr >> 0) & RAPL_POWER_GRANULARITY) * rapl_power_units;
 
        switch (model) {
@@ -1950,6 +2073,7 @@ rapl_dram_energy_units_probe(int  model, double rapl_energy_units)
        case 0x3F:      /* HSX */
        case 0x4F:      /* BDX */
        case 0x56:      /* BDX-DE */
+       case 0x57:      /* KNL */
                return (rapl_dram_energy_units = 15.3 / 1000000);
        default:
                return (rapl_energy_units);
@@ -1991,6 +2115,7 @@ void rapl_probe(unsigned int family, unsigned int model)
        case 0x3F:      /* HSX */
        case 0x4F:      /* BDX */
        case 0x56:      /* BDX-DE */
+       case 0x57:      /* KNL */
                do_rapl = RAPL_PKG | RAPL_DRAM | RAPL_DRAM_POWER_INFO | RAPL_DRAM_PERF_STATUS | RAPL_PKG_PERF_STATUS | RAPL_PKG_POWER_INFO;
                break;
        case 0x2D:
@@ -2006,7 +2131,7 @@ void rapl_probe(unsigned int family, unsigned int model)
        }
 
        /* units on package 0, verify later other packages match */
-       if (get_msr(0, MSR_RAPL_POWER_UNIT, &msr))
+       if (get_msr(base_cpu, MSR_RAPL_POWER_UNIT, &msr))
                return;
 
        rapl_power_units = 1.0 / (1 << (msr & 0xF));
@@ -2331,6 +2456,17 @@ int is_slm(unsigned int family, unsigned int model)
        return 0;
 }
 
+int is_knl(unsigned int family, unsigned int model)
+{
+       if (!genuine_intel)
+               return 0;
+       switch (model) {
+       case 0x57:      /* KNL */
+               return 1;
+       }
+       return 0;
+}
+
 #define SLM_BCLK_FREQS 5
 double slm_freq_table[SLM_BCLK_FREQS] = { 83.3, 100.0, 133.3, 116.7, 80.0};
 
@@ -2340,7 +2476,7 @@ double slm_bclk(void)
        unsigned int i;
        double freq;
 
-       if (get_msr(0, MSR_FSB_FREQ, &msr))
+       if (get_msr(base_cpu, MSR_FSB_FREQ, &msr))
                fprintf(stderr, "SLM BCLK: unknown\n");
 
        i = msr & 0xf;
@@ -2408,7 +2544,7 @@ int set_temperature_target(struct thread_data *t, struct core_data *c, struct pk
        if (!do_nhm_platform_info)
                goto guess;
 
-       if (get_msr(0, MSR_IA32_TEMPERATURE_TARGET, &msr))
+       if (get_msr(base_cpu, MSR_IA32_TEMPERATURE_TARGET, &msr))
                goto guess;
 
        target_c_local = (msr >> 16) & 0xFF;
@@ -2541,6 +2677,7 @@ void process_cpuid()
        do_c8_c9_c10 = has_hsw_msrs(family, model);
        do_skl_residency = has_skl_msrs(family, model);
        do_slm_cstates = is_slm(family, model);
+       do_knl_cstates  = is_knl(family, model);
        bclk = discover_bclk(family, model);
 
        rapl_probe(family, model);
@@ -2755,13 +2892,9 @@ int initialize_counters(int cpu_id)
 
        my_package_id = get_physical_package_id(cpu_id);
        my_core_id = get_core_id(cpu_id);
-
-       if (cpu_is_first_sibling_in_core(cpu_id)) {
-               my_thread_id = 0;
+       my_thread_id = get_cpu_position_in_core(cpu_id);
+       if (!my_thread_id)
                topo.num_cores++;
-       } else {
-               my_thread_id = 1;
-       }
 
        init_counter(EVEN_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
        init_counter(ODD_COUNTERS, my_thread_id, my_core_id, my_package_id, cpu_id);
@@ -2785,13 +2918,24 @@ void setup_all_buffers(void)
        for_all_proc_cpus(initialize_counters);
 }
 
+void set_base_cpu(void)
+{
+       base_cpu = sched_getcpu();
+       if (base_cpu < 0)
+               err(-ENODEV, "No valid cpus found");
+
+       if (debug > 1)
+               fprintf(stderr, "base_cpu = %d\n", base_cpu);
+}
+
 void turbostat_init()
 {
+       setup_all_buffers();
+       set_base_cpu();
        check_dev_msr();
        check_permissions();
        process_cpuid();
 
-       setup_all_buffers();
 
        if (debug)
                for_all_cpus(print_epb, ODD_COUNTERS);
@@ -2870,7 +3014,7 @@ int get_and_dump_counters(void)
 }
 
 void print_version() {
-       fprintf(stderr, "turbostat version 4.5 2 Apr, 2015"
+       fprintf(stderr, "turbostat version 4.7 27-May, 2015"
                " - Len Brown <lenb@kernel.org>\n");
 }
 
index 15f1a17ca96e69695f5f266cc58198102ba1a0c9..3f81a109520693ec6ebac0abe78c73388b8fc334 100755 (executable)
@@ -66,7 +66,7 @@ make $buildloc $TORTURE_DEFCONFIG > $builddir/Make.defconfig.out 2>&1
 mv $builddir/.config $builddir/.config.sav
 sh $T/upd.sh < $builddir/.config.sav > $builddir/.config
 cp $builddir/.config $builddir/.config.new
-yes '' | make $buildloc oldconfig > $builddir/Make.modconfig.out 2>&1
+yes '' | make $buildloc oldconfig > $builddir/Make.oldconfig.out 2> $builddir/Make.oldconfig.err
 
 # verify new config matches specification.
 configcheck.sh $builddir/.config $c
index 4f5b20f367a944f07f0443a480dd79d061031d87..d86bdd6b6cc2df3148adf7bacff4c6a014edc3cc 100755 (executable)
@@ -43,6 +43,10 @@ do
                if test -f "$i/console.log"
                then
                        configcheck.sh $i/.config $i/ConfigFragment
+                       if test -r $i/Make.oldconfig.err
+                       then
+                               cat $i/Make.oldconfig.err
+                       fi
                        parse-build.sh $i/Make.out $configfile
                        parse-torture.sh $i/console.log $configfile
                        parse-console.sh $i/console.log $configfile
index dd2812ceb0baba2bb4f39343e428582e9619179f..fbe2dbff1e210c2f4711d2f581823d6b14c85799 100755 (executable)
@@ -55,7 +55,7 @@ usage () {
        echo "       --bootargs kernel-boot-arguments"
        echo "       --bootimage relative-path-to-kernel-boot-image"
        echo "       --buildonly"
-       echo "       --configs \"config-file list\""
+       echo "       --configs \"config-file list w/ repeat factor (3*TINY01)\""
        echo "       --cpus N"
        echo "       --datestamp string"
        echo "       --defconfig string"
@@ -178,13 +178,26 @@ fi
 touch $T/cfgcpu
 for CF in $configs
 do
-       if test -f "$CONFIGFRAG/$CF"
+       case $CF in
+       [0-9]\**|[0-9][0-9]\**|[0-9][0-9][0-9]\**)
+               config_reps=`echo $CF | sed -e 's/\*.*$//'`
+               CF1=`echo $CF | sed -e 's/^[^*]*\*//'`
+               ;;
+       *)
+               config_reps=1
+               CF1=$CF
+               ;;
+       esac
+       if test -f "$CONFIGFRAG/$CF1"
        then
-               cpu_count=`configNR_CPUS.sh $CONFIGFRAG/$CF`
-               cpu_count=`configfrag_boot_cpus "$TORTURE_BOOTARGS" "$CONFIGFRAG/$CF" "$cpu_count"`
-               echo $CF $cpu_count >> $T/cfgcpu
+               cpu_count=`configNR_CPUS.sh $CONFIGFRAG/$CF1`
+               cpu_count=`configfrag_boot_cpus "$TORTURE_BOOTARGS" "$CONFIGFRAG/$CF1" "$cpu_count"`
+               for ((cur_rep=0;cur_rep<$config_reps;cur_rep++))
+               do
+                       echo $CF1 $cpu_count >> $T/cfgcpu
+               done
        else
-               echo "The --configs file $CF does not exist, terminating."
+               echo "The --configs file $CF1 does not exist, terminating."
                exit 1
        fi
 done
index 49701218dc620481ff32344863411413e208da66..f824b4c9d9d9132d9681c5fe51c26caa76d4d8de 100644 (file)
@@ -1,3 +1,5 @@
 CONFIG_RCU_TORTURE_TEST=y
 CONFIG_PRINTK_TIME=y
+CONFIG_RCU_TORTURE_TEST_SLOW_CLEANUP=y
 CONFIG_RCU_TORTURE_TEST_SLOW_INIT=y
+CONFIG_RCU_TORTURE_TEST_SLOW_PREINIT=y
index 9fbb41b9b3149ed62cb0e97aabe6731d7bdc3c6c..1a087c3c8bb861584c069142bfe1fd8b02415956 100644 (file)
@@ -5,3 +5,4 @@ CONFIG_HOTPLUG_CPU=y
 CONFIG_PREEMPT_NONE=y
 CONFIG_PREEMPT_VOLUNTARY=n
 CONFIG_PREEMPT=n
+CONFIG_RCU_EXPERT=y
index 4b6f272dba27f8483f45c99a4a28f419e1b9d69a..4837430a71c0c3456979eb02a635ac0b9a3e1318 100644 (file)
@@ -5,3 +5,4 @@ CONFIG_HOTPLUG_CPU=y
 CONFIG_PREEMPT_NONE=n
 CONFIG_PREEMPT_VOLUNTARY=n
 CONFIG_PREEMPT=y
+#CHECK#CONFIG_RCU_EXPERT=n
index 238bfe3bd0cccf5096cace6c1012dac22d111728..84a7d51b7481e7a4173b0e87f9d46b51b65ef1eb 100644 (file)
@@ -1 +1 @@
-rcutorture.torture_type=srcu
+rcutorture.torture_type=srcud
index 97f0a0b27ef7293ed4cb9fa364fbee81629807c2..2cc0e60eba6eed66c1ebf6f5d0c85a19b0564ec3 100644 (file)
@@ -5,5 +5,6 @@ CONFIG_PREEMPT_NONE=n
 CONFIG_PREEMPT_VOLUNTARY=n
 CONFIG_PREEMPT=y
 CONFIG_DEBUG_LOCK_ALLOC=y
-CONFIG_PROVE_RCU=y
-CONFIG_TASKS_RCU=y
+CONFIG_PROVE_LOCKING=n
+#CHECK#CONFIG_PROVE_RCU=n
+CONFIG_RCU_EXPERT=y
index 696d2ea74d13bb92ba91ed45a0f0df951d7234b9..ad2be91e5ee7624e95df63885f70dbbc833afb64 100644 (file)
@@ -2,4 +2,3 @@ CONFIG_SMP=n
 CONFIG_PREEMPT_NONE=y
 CONFIG_PREEMPT_VOLUNTARY=n
 CONFIG_PREEMPT=n
-CONFIG_TASKS_RCU=y
index 9c60da5b5d1ddae021ef28ed3b5f5a008186dc11..c70c51d5ded15af901e1a2c267b144a9cc88ba4d 100644 (file)
@@ -6,8 +6,8 @@ CONFIG_HIBERNATION=n
 CONFIG_PREEMPT_NONE=n
 CONFIG_PREEMPT_VOLUNTARY=n
 CONFIG_PREEMPT=y
-CONFIG_TASKS_RCU=y
 CONFIG_HZ_PERIODIC=n
 CONFIG_NO_HZ_IDLE=n
 CONFIG_NO_HZ_FULL=y
 CONFIG_NO_HZ_FULL_ALL=y
+#CHECK#CONFIG_RCU_EXPERT=n
index 36e41df3d27aa6ad7abdf56dba609fd51bb1a9b1..f1892e0371c954bd5cfc800a6b9cc87297e9433e 100644 (file)
@@ -8,7 +8,7 @@ CONFIG_NO_HZ_IDLE=n
 CONFIG_NO_HZ_FULL=n
 CONFIG_RCU_TRACE=y
 CONFIG_PROVE_LOCKING=y
-CONFIG_PROVE_RCU=y
+#CHECK#CONFIG_PROVE_RCU=y
 CONFIG_DEBUG_LOCK_ALLOC=y
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
 CONFIG_PREEMPT_COUNT=y
index 0f0802730014c675b6f830601d4aed340aa1f4d3..6c1a292a65fb499968bd2495fa046aedb02b39f8 100644 (file)
@@ -1,2 +1,3 @@
 rcupdate.rcu_self_test=1
 rcupdate.rcu_self_test_bh=1
+rcutorture.torture_type=rcu_bh
index f8a10a7500c64f057987e3d5e3ab765d322973fb..8e9137f66831f2fa6f7ea1c2de7aba6fd8fa99c2 100644 (file)
@@ -16,3 +16,4 @@ CONFIG_DEBUG_LOCK_ALLOC=n
 CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_BOOST=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
+CONFIG_RCU_EXPERT=y
index 629122fb8b4a12323e800f09453a3a06718927d8..aeea6a204d14b1ea87737d6d5024f75668f6dd68 100644 (file)
@@ -14,10 +14,10 @@ CONFIG_SUSPEND=n
 CONFIG_HIBERNATION=n
 CONFIG_RCU_FANOUT=3
 CONFIG_RCU_FANOUT_LEAF=3
-CONFIG_RCU_FANOUT_EXACT=n
 CONFIG_RCU_NOCB_CPU=n
 CONFIG_DEBUG_LOCK_ALLOC=y
 CONFIG_PROVE_LOCKING=n
 CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_BOOST=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
+CONFIG_RCU_EXPERT=y
index a25de47888a4fc11371adc7bc99a93fe2dfcf8d6..2ac9e68ea3d1481764b82eaba427039ed45fa992 100644 (file)
@@ -14,7 +14,6 @@ CONFIG_SUSPEND=n
 CONFIG_HIBERNATION=n
 CONFIG_RCU_FANOUT=3
 CONFIG_RCU_FANOUT_LEAF=3
-CONFIG_RCU_FANOUT_EXACT=n
 CONFIG_RCU_NOCB_CPU=n
 CONFIG_DEBUG_LOCK_ALLOC=y
 CONFIG_PROVE_LOCKING=n
index 53f24e0a0ab618c8470e1727ecec0a8d78ce5db3..72aa7d87ea99159db02b4f07976abdc99da3e71b 100644 (file)
@@ -1,5 +1,5 @@
 CONFIG_SMP=y
-CONFIG_NR_CPUS=8
+CONFIG_NR_CPUS=16
 CONFIG_PREEMPT_NONE=n
 CONFIG_PREEMPT_VOLUNTARY=n
 CONFIG_PREEMPT=y
@@ -9,12 +9,12 @@ CONFIG_NO_HZ_IDLE=n
 CONFIG_NO_HZ_FULL=n
 CONFIG_RCU_TRACE=y
 CONFIG_HOTPLUG_CPU=y
-CONFIG_RCU_FANOUT=4
-CONFIG_RCU_FANOUT_LEAF=4
-CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_RCU_FANOUT=2
+CONFIG_RCU_FANOUT_LEAF=2
 CONFIG_RCU_NOCB_CPU=n
 CONFIG_DEBUG_LOCK_ALLOC=n
 CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_BOOST=y
 CONFIG_RCU_KTHREAD_PRIO=2
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
+CONFIG_RCU_EXPERT=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE03.boot
new file mode 100644 (file)
index 0000000..120c0c8
--- /dev/null
@@ -0,0 +1 @@
+rcutorture.onoff_interval=1 rcutorture.onoff_holdoff=30
index 0f84db35b36d6221b05a1a25d001f459b0870a08..3f5112751cda0d571af0967395d0b80e18e8543a 100644 (file)
@@ -13,10 +13,10 @@ CONFIG_RCU_TRACE=y
 CONFIG_HOTPLUG_CPU=n
 CONFIG_SUSPEND=n
 CONFIG_HIBERNATION=n
-CONFIG_RCU_FANOUT=2
-CONFIG_RCU_FANOUT_LEAF=2
-CONFIG_RCU_FANOUT_EXACT=n
+CONFIG_RCU_FANOUT=4
+CONFIG_RCU_FANOUT_LEAF=4
 CONFIG_RCU_NOCB_CPU=n
 CONFIG_DEBUG_LOCK_ALLOC=n
-CONFIG_RCU_CPU_STALL_INFO=y
+CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
+CONFIG_RCU_EXPERT=y
index 212e3bfd2b2ac0b8d21bc261e247600733e3c5cc..c04dfea6fd217e5ddb45cb7b46257b4979454a44 100644 (file)
@@ -12,11 +12,11 @@ CONFIG_RCU_TRACE=n
 CONFIG_HOTPLUG_CPU=y
 CONFIG_RCU_FANOUT=6
 CONFIG_RCU_FANOUT_LEAF=6
-CONFIG_RCU_FANOUT_EXACT=n
 CONFIG_RCU_NOCB_CPU=y
 CONFIG_RCU_NOCB_CPU_NONE=y
 CONFIG_DEBUG_LOCK_ALLOC=y
 CONFIG_PROVE_LOCKING=y
-CONFIG_PROVE_RCU=y
+#CHECK#CONFIG_PROVE_RCU=y
 CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
+CONFIG_RCU_EXPERT=y
index 7eee63b442181267b6ae4ca0c723af59bcb2ac37..f51d2c73a68ec221f45d26d93e0a2743751447b7 100644 (file)
@@ -14,10 +14,10 @@ CONFIG_SUSPEND=n
 CONFIG_HIBERNATION=n
 CONFIG_RCU_FANOUT=6
 CONFIG_RCU_FANOUT_LEAF=6
-CONFIG_RCU_FANOUT_EXACT=y
 CONFIG_RCU_NOCB_CPU=n
 CONFIG_DEBUG_LOCK_ALLOC=y
 CONFIG_PROVE_LOCKING=y
-CONFIG_PROVE_RCU=y
+#CHECK#CONFIG_PROVE_RCU=y
 CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
+CONFIG_RCU_EXPERT=y
index da9a03a398db148effea02957c0e52cad42f0224..dd90f28ed700b31e897ef5f2eff1ed577f031d05 100644 (file)
@@ -1,3 +1,4 @@
 rcupdate.rcu_self_test=1
 rcupdate.rcu_self_test_bh=1
 rcupdate.rcu_self_test_sched=1
+rcutree.rcu_fanout_exact=1
index 92a97fa97dec1c4b06dabea9dcbea5e614bad965..f422af4ff5a31bf0c497c7d652b1c0b6e7d30a34 100644 (file)
@@ -15,8 +15,8 @@ CONFIG_RCU_TRACE=y
 CONFIG_HOTPLUG_CPU=y
 CONFIG_RCU_FANOUT=2
 CONFIG_RCU_FANOUT_LEAF=2
-CONFIG_RCU_FANOUT_EXACT=n
 CONFIG_RCU_NOCB_CPU=n
 CONFIG_DEBUG_LOCK_ALLOC=n
-CONFIG_RCU_CPU_STALL_INFO=y
+CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
+CONFIG_RCU_EXPERT=y
index 5812027d6f9ff043fecdcb2558a0c4befe83b50c..a24d2ca30646c3afe48417cdbb47e085a09d7486 100644 (file)
@@ -1,5 +1,5 @@
 CONFIG_SMP=y
-CONFIG_NR_CPUS=16
+CONFIG_NR_CPUS=8
 CONFIG_PREEMPT_NONE=n
 CONFIG_PREEMPT_VOLUNTARY=n
 CONFIG_PREEMPT=y
@@ -13,13 +13,13 @@ CONFIG_HOTPLUG_CPU=n
 CONFIG_SUSPEND=n
 CONFIG_HIBERNATION=n
 CONFIG_RCU_FANOUT=3
-CONFIG_RCU_FANOUT_EXACT=y
 CONFIG_RCU_FANOUT_LEAF=2
 CONFIG_RCU_NOCB_CPU=y
 CONFIG_RCU_NOCB_CPU_ALL=y
 CONFIG_DEBUG_LOCK_ALLOC=n
 CONFIG_PROVE_LOCKING=y
-CONFIG_PROVE_RCU=y
+#CHECK#CONFIG_PROVE_RCU=y
 CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_BOOST=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
+CONFIG_RCU_EXPERT=y
index 3eaeccacb08389117d7d2ace6e4381049a2e5e57..b2b8cea69dc9935cadf032524c64406a4aa92ef7 100644 (file)
@@ -13,7 +13,6 @@ CONFIG_HOTPLUG_CPU=n
 CONFIG_SUSPEND=n
 CONFIG_HIBERNATION=n
 CONFIG_RCU_FANOUT=3
-CONFIG_RCU_FANOUT_EXACT=y
 CONFIG_RCU_FANOUT_LEAF=2
 CONFIG_RCU_NOCB_CPU=y
 CONFIG_RCU_NOCB_CPU_ALL=y
diff --git a/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T.boot b/tools/testing/selftests/rcutorture/configs/rcu/TREE08-T.boot
new file mode 100644 (file)
index 0000000..883149b
--- /dev/null
@@ -0,0 +1 @@
+rcutree.rcu_fanout_exact=1
index 2561daf605ad5f9a09920e98b18ab451d3ddada3..fb066dc82769fe4f910f4e86ec3c4e1541071adb 100644 (file)
@@ -1,3 +1,4 @@
 rcutorture.torture_type=sched
 rcupdate.rcu_self_test=1
 rcupdate.rcu_self_test_sched=1
+rcutree.rcu_fanout_exact=1
index 6076b36f6c0b452c27ffb0c80e40fdacc4014eca..aa4ed08d999d9dbe8b438e39841ab8093d157c34 100644 (file)
@@ -16,3 +16,4 @@ CONFIG_DEBUG_LOCK_ALLOC=n
 CONFIG_RCU_CPU_STALL_INFO=n
 CONFIG_RCU_BOOST=n
 CONFIG_DEBUG_OBJECTS_RCU_HEAD=n
+#CHECK#CONFIG_RCU_EXPERT=n
index ec03c883db005192c3d5c644ff2595f3e3c6f673..b24c0004fc499868046eee81993caba27b5f1827 100644 (file)
@@ -12,13 +12,12 @@ CONFIG_NO_HZ_IDLE -- Do those not otherwise specified. (Groups of two.)
 CONFIG_NO_HZ_FULL -- Do two, one with CONFIG_NO_HZ_FULL_SYSIDLE.
 CONFIG_NO_HZ_FULL_SYSIDLE -- Do one.
 CONFIG_PREEMPT -- Do half.  (First three and #8.)
-CONFIG_PROVE_LOCKING -- Do all but two, covering CONFIG_PROVE_RCU and not.
-CONFIG_PROVE_RCU -- Do all but one under CONFIG_PROVE_LOCKING.
+CONFIG_PROVE_LOCKING -- Do several, covering CONFIG_DEBUG_LOCK_ALLOC=y and not.
+CONFIG_PROVE_RCU -- Hardwired to CONFIG_PROVE_LOCKING.
 CONFIG_RCU_BOOST -- one of PREEMPT_RCU.
 CONFIG_RCU_KTHREAD_PRIO -- set to 2 for _BOOST testing.
-CONFIG_RCU_CPU_STALL_INFO -- Do one.
-CONFIG_RCU_FANOUT -- Cover hierarchy as currently, but overlap with others.
-CONFIG_RCU_FANOUT_EXACT -- Do one.
+CONFIG_RCU_CPU_STALL_INFO -- Now default, avoid at least twice.
+CONFIG_RCU_FANOUT -- Cover hierarchy, but overlap with others.
 CONFIG_RCU_FANOUT_LEAF -- Do one non-default.
 CONFIG_RCU_FAST_NO_HZ -- Do one, but not with CONFIG_RCU_NOCB_CPU_ALL.
 CONFIG_RCU_NOCB_CPU -- Do three, see below.
@@ -27,28 +26,19 @@ CONFIG_RCU_NOCB_CPU_NONE -- Do one.
 CONFIG_RCU_NOCB_CPU_ZERO -- Do one.
 CONFIG_RCU_TRACE -- Do half.
 CONFIG_SMP -- Need one !SMP for PREEMPT_RCU.
+!RCU_EXPERT -- Do a few, but these have to be vanilla configurations.
 RCU-bh: Do one with PREEMPT and one with !PREEMPT.
 RCU-sched: Do one with PREEMPT but not BOOST.
 
 
-Hierarchy:
-
-TREE01.        CONFIG_NR_CPUS=8, CONFIG_RCU_FANOUT=8, CONFIG_RCU_FANOUT_EXACT=n.
-TREE02.        CONFIG_NR_CPUS=8, CONFIG_RCU_FANOUT=3, CONFIG_RCU_FANOUT_EXACT=n,
-       CONFIG_RCU_FANOUT_LEAF=3.
-TREE03.        CONFIG_NR_CPUS=8, CONFIG_RCU_FANOUT=4, CONFIG_RCU_FANOUT_EXACT=n,
-       CONFIG_RCU_FANOUT_LEAF=4.
-TREE04.        CONFIG_NR_CPUS=8, CONFIG_RCU_FANOUT=2, CONFIG_RCU_FANOUT_EXACT=n,
-       CONFIG_RCU_FANOUT_LEAF=2.
-TREE05.        CONFIG_NR_CPUS=8, CONFIG_RCU_FANOUT=6, CONFIG_RCU_FANOUT_EXACT=n
-       CONFIG_RCU_FANOUT_LEAF=6.
-TREE06.        CONFIG_NR_CPUS=8, CONFIG_RCU_FANOUT=6, CONFIG_RCU_FANOUT_EXACT=y
-       CONFIG_RCU_FANOUT_LEAF=6.
-TREE07.        CONFIG_NR_CPUS=16, CONFIG_RCU_FANOUT=2, CONFIG_RCU_FANOUT_EXACT=n,
-       CONFIG_RCU_FANOUT_LEAF=2.
-TREE08.        CONFIG_NR_CPUS=16, CONFIG_RCU_FANOUT=3, CONFIG_RCU_FANOUT_EXACT=y,
-       CONFIG_RCU_FANOUT_LEAF=2.
-TREE09.        CONFIG_NR_CPUS=1.
+Boot parameters:
+
+nohz_full - do at least one.
+maxcpu -- do at least one.
+rcupdate.rcu_self_test_bh -- Do at least one each, offloaded and not.
+rcupdate.rcu_self_test_sched -- Do at least one each, offloaded and not.
+rcupdate.rcu_self_test -- Do at least one each, offloaded and not.
+rcutree.rcu_fanout_exact -- Do at least one.
 
 
 Kconfig Parameters Ignored:
index b8272e6c4b3b19aa2ec92790b3b6f6cd4addd11b..fb46ad6ac92cadc71254c68c80953eeae51b218b 100644 (file)
@@ -44,6 +44,7 @@
 #include <time.h>
 #include <sys/time.h>
 #include <sys/timex.h>
+#include <sys/errno.h>
 #include <string.h>
 #include <signal.h>
 #include <unistd.h>
@@ -63,6 +64,9 @@ static inline int ksft_exit_fail(void)
 #define NSEC_PER_SEC 1000000000ULL
 #define CLOCK_TAI 11
 
+time_t next_leap;
+int error_found;
+
 /* returns 1 if a <= b, 0 otherwise */
 static inline int in_order(struct timespec a, struct timespec b)
 {
@@ -134,6 +138,35 @@ void handler(int unused)
        exit(0);
 }
 
+void sigalarm(int signo)
+{
+       struct timex tx;
+       int ret;
+
+       tx.modes = 0;
+       ret = adjtimex(&tx);
+
+       if (tx.time.tv_sec < next_leap) {
+               printf("Error: Early timer expiration! (Should be %ld)\n", next_leap);
+               error_found = 1;
+               printf("adjtimex: %10ld sec + %6ld us (%i)\t%s\n",
+                                       tx.time.tv_sec,
+                                       tx.time.tv_usec,
+                                       tx.tai,
+                                       time_state_str(ret));
+       }
+       if (ret != TIME_WAIT) {
+               printf("Error: Timer seeing incorrect NTP state? (Should be TIME_WAIT)\n");
+               error_found = 1;
+               printf("adjtimex: %10ld sec + %6ld us (%i)\t%s\n",
+                                       tx.time.tv_sec,
+                                       tx.time.tv_usec,
+                                       tx.tai,
+                                       time_state_str(ret));
+       }
+}
+
+
 /* Test for known hrtimer failure */
 void test_hrtimer_failure(void)
 {
@@ -144,12 +177,19 @@ void test_hrtimer_failure(void)
        clock_nanosleep(CLOCK_REALTIME, TIMER_ABSTIME, &target, NULL);
        clock_gettime(CLOCK_REALTIME, &now);
 
-       if (!in_order(target, now))
+       if (!in_order(target, now)) {
                printf("ERROR: hrtimer early expiration failure observed.\n");
+               error_found = 1;
+       }
 }
 
 int main(int argc, char **argv)
 {
+       timer_t tm1;
+       struct itimerspec its1;
+       struct sigevent se;
+       struct sigaction act;
+       int signum = SIGRTMAX;
        int settime = 0;
        int tai_time = 0;
        int insert = 1;
@@ -191,6 +231,12 @@ int main(int argc, char **argv)
        signal(SIGINT, handler);
        signal(SIGKILL, handler);
 
+       /* Set up timer signal handler: */
+       sigfillset(&act.sa_mask);
+       act.sa_flags = 0;
+       act.sa_handler = sigalarm;
+       sigaction(signum, &act, NULL);
+
        if (iterations < 0)
                printf("This runs continuously. Press ctrl-c to stop\n");
        else
@@ -201,7 +247,7 @@ int main(int argc, char **argv)
                int ret;
                struct timespec ts;
                struct timex tx;
-               time_t now, next_leap;
+               time_t now;
 
                /* Get the current time */
                clock_gettime(CLOCK_REALTIME, &ts);
@@ -251,10 +297,27 @@ int main(int argc, char **argv)
 
                printf("Scheduling leap second for %s", ctime(&next_leap));
 
+               /* Set up timer */
+               printf("Setting timer for %ld -  %s", next_leap, ctime(&next_leap));
+               memset(&se, 0, sizeof(se));
+               se.sigev_notify = SIGEV_SIGNAL;
+               se.sigev_signo = signum;
+               se.sigev_value.sival_int = 0;
+               if (timer_create(CLOCK_REALTIME, &se, &tm1) == -1) {
+                       printf("Error: timer_create failed\n");
+                       return ksft_exit_fail();
+               }
+               its1.it_value.tv_sec = next_leap;
+               its1.it_value.tv_nsec = 0;
+               its1.it_interval.tv_sec = 0;
+               its1.it_interval.tv_nsec = 0;
+               timer_settime(tm1, TIMER_ABSTIME, &its1, NULL);
+
                /* Wake up 3 seconds before leap */
                ts.tv_sec = next_leap - 3;
                ts.tv_nsec = 0;
 
+
                while (clock_nanosleep(CLOCK_REALTIME, TIMER_ABSTIME, &ts, NULL))
                        printf("Something woke us up, returning to sleep\n");
 
@@ -276,6 +339,7 @@ int main(int argc, char **argv)
                while (now < next_leap + 2) {
                        char buf[26];
                        struct timespec tai;
+                       int ret;
 
                        tx.modes = 0;
                        ret = adjtimex(&tx);
@@ -308,8 +372,13 @@ int main(int argc, char **argv)
                /* Note if kernel has known hrtimer failure */
                test_hrtimer_failure();
 
-               printf("Leap complete\n\n");
-
+               printf("Leap complete\n");
+               if (error_found) {
+                       printf("Errors observed\n");
+                       clear_time_state();
+                       return ksft_exit_fail();
+               }
+               printf("\n");
                if ((iterations != -1) && !(--iterations))
                        break;
        }
index ddf63569df5ae166e466901aa1a60e1b194fe508..caa60d56d7d1f56d00c6cf087a5c48e863ea7be9 100644 (file)
@@ -1,48 +1,62 @@
-.PHONY: all all_32 all_64 check_build32 clean run_tests
+all:
 
-TARGETS_C_BOTHBITS := sigreturn single_step_syscall
+include ../lib.mk
 
-BINARIES_32 := $(TARGETS_C_BOTHBITS:%=%_32)
+.PHONY: all all_32 all_64 warn_32bit_failure clean
+
+TARGETS_C_BOTHBITS := sigreturn single_step_syscall sysret_ss_attrs
+TARGETS_C_32BIT_ONLY := entry_from_vm86
+
+TARGETS_C_32BIT_ALL := $(TARGETS_C_BOTHBITS) $(TARGETS_C_32BIT_ONLY)
+BINARIES_32 := $(TARGETS_C_32BIT_ALL:%=%_32)
 BINARIES_64 := $(TARGETS_C_BOTHBITS:%=%_64)
 
 CFLAGS := -O2 -g -std=gnu99 -pthread -Wall
 
-UNAME_P := $(shell uname -p)
+UNAME_M := $(shell uname -m)
+CAN_BUILD_I386 := $(shell ./check_cc.sh $(CC) trivial_32bit_program.c -m32)
+CAN_BUILD_X86_64 := $(shell ./check_cc.sh $(CC) trivial_64bit_program.c)
 
-# Always build 32-bit tests
+ifeq ($(CAN_BUILD_I386),1)
 all: all_32
+TEST_PROGS += $(BINARIES_32)
+endif
 
-# If we're on a 64-bit host, build 64-bit tests as well
-ifeq ($(shell uname -p),x86_64)
+ifeq ($(CAN_BUILD_X86_64),1)
 all: all_64
+TEST_PROGS += $(BINARIES_64)
 endif
 
-all_32: check_build32 $(BINARIES_32)
+all_32: $(BINARIES_32)
 
 all_64: $(BINARIES_64)
 
 clean:
        $(RM) $(BINARIES_32) $(BINARIES_64)
 
-run_tests:
-       ./run_x86_tests.sh
-
-$(TARGETS_C_BOTHBITS:%=%_32): %_32: %.c
+$(TARGETS_C_32BIT_ALL:%=%_32): %_32: %.c
        $(CC) -m32 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl
 
 $(TARGETS_C_BOTHBITS:%=%_64): %_64: %.c
        $(CC) -m64 -o $@ $(CFLAGS) $(EXTRA_CFLAGS) $^ -lrt -ldl
 
-check_build32:
-       @if ! $(CC) -m32 -o /dev/null trivial_32bit_program.c; then     \
-         echo "Warning: you seem to have a broken 32-bit build" 2>&1;  \
-         echo "environment.  If you are using a Debian-like";          \
-         echo " distribution, try:";                                   \
-         echo "";                                                      \
-         echo "  apt-get install gcc-multilib libc6-i386 libc6-dev-i386"; \
-         echo "";                                                      \
-         echo "If you are using a Fedora-like distribution, try:";     \
-         echo "";                                                      \
-         echo "  yum install glibc-devel.*i686";                       \
-         exit 1;                                                       \
-       fi
+# x86_64 users should be encouraged to install 32-bit libraries
+ifeq ($(CAN_BUILD_I386)$(CAN_BUILD_X86_64),01)
+all: warn_32bit_failure
+
+warn_32bit_failure:
+       @echo "Warning: you seem to have a broken 32-bit build" 2>&1;   \
+       echo "environment.  This will reduce test coverage of 64-bit" 2>&1; \
+       echo "kernels.  If you are using a Debian-like distribution," 2>&1; \
+       echo "try:"; 2>&1; \
+       echo "";                                                        \
+       echo "  apt-get install gcc-multilib libc6-i386 libc6-dev-i386"; \
+       echo "";                                                        \
+       echo "If you are using a Fedora-like distribution, try:";       \
+       echo "";                                                        \
+       echo "  yum install glibc-devel.*i686";                         \
+       exit 0;
+endif
+
+# Some tests have additional dependencies.
+sysret_ss_attrs_64: thunks.S
diff --git a/tools/testing/selftests/x86/check_cc.sh b/tools/testing/selftests/x86/check_cc.sh
new file mode 100755 (executable)
index 0000000..172d329
--- /dev/null
@@ -0,0 +1,16 @@
+#!/bin/sh
+# check_cc.sh - Helper to test userspace compilation support
+# Copyright (c) 2015 Andrew Lutomirski
+# GPL v2
+
+CC="$1"
+TESTPROG="$2"
+shift 2
+
+if "$CC" -o /dev/null "$TESTPROG" -O0 "$@" 2>/dev/null; then
+    echo 1
+else
+    echo 0
+fi
+
+exit 0
diff --git a/tools/testing/selftests/x86/entry_from_vm86.c b/tools/testing/selftests/x86/entry_from_vm86.c
new file mode 100644 (file)
index 0000000..5c38a18
--- /dev/null
@@ -0,0 +1,114 @@
+/*
+ * entry_from_vm86.c - tests kernel entries from vm86 mode
+ * Copyright (c) 2014-2015 Andrew Lutomirski
+ *
+ * This exercises a few paths that need to special-case vm86 mode.
+ *
+ * GPL v2.
+ */
+
+#define _GNU_SOURCE
+
+#include <assert.h>
+#include <stdlib.h>
+#include <sys/syscall.h>
+#include <sys/signal.h>
+#include <sys/ucontext.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <inttypes.h>
+#include <sys/mman.h>
+#include <err.h>
+#include <stddef.h>
+#include <stdbool.h>
+#include <errno.h>
+#include <sys/vm86.h>
+
+static unsigned long load_addr = 0x10000;
+static int nerrs = 0;
+
+asm (
+       ".pushsection .rodata\n\t"
+       ".type vmcode_bound, @object\n\t"
+       "vmcode:\n\t"
+       "vmcode_bound:\n\t"
+       ".code16\n\t"
+       "bound %ax, (2048)\n\t"
+       "int3\n\t"
+       "vmcode_sysenter:\n\t"
+       "sysenter\n\t"
+       ".size vmcode, . - vmcode\n\t"
+       "end_vmcode:\n\t"
+       ".code32\n\t"
+       ".popsection"
+       );
+
+extern unsigned char vmcode[], end_vmcode[];
+extern unsigned char vmcode_bound[], vmcode_sysenter[];
+
+static void do_test(struct vm86plus_struct *v86, unsigned long eip,
+                   const char *text)
+{
+       long ret;
+
+       printf("[RUN]\t%s from vm86 mode\n", text);
+       v86->regs.eip = eip;
+       ret = vm86(VM86_ENTER, v86);
+
+       if (ret == -1 && errno == ENOSYS) {
+               printf("[SKIP]\tvm86 not supported\n");
+               return;
+       }
+
+       if (VM86_TYPE(ret) == VM86_INTx) {
+               char trapname[32];
+               int trapno = VM86_ARG(ret);
+               if (trapno == 13)
+                       strcpy(trapname, "GP");
+               else if (trapno == 5)
+                       strcpy(trapname, "BR");
+               else if (trapno == 14)
+                       strcpy(trapname, "PF");
+               else
+                       sprintf(trapname, "%d", trapno);
+
+               printf("[OK]\tExited vm86 mode due to #%s\n", trapname);
+       } else if (VM86_TYPE(ret) == VM86_UNKNOWN) {
+               printf("[OK]\tExited vm86 mode due to unhandled GP fault\n");
+       } else {
+               printf("[OK]\tExited vm86 mode due to type %ld, arg %ld\n",
+                      VM86_TYPE(ret), VM86_ARG(ret));
+       }
+}
+
+int main(void)
+{
+       struct vm86plus_struct v86;
+       unsigned char *addr = mmap((void *)load_addr, 4096,
+                                  PROT_READ | PROT_WRITE | PROT_EXEC,
+                                  MAP_ANONYMOUS | MAP_PRIVATE, -1,0);
+       if (addr != (unsigned char *)load_addr)
+               err(1, "mmap");
+
+       memcpy(addr, vmcode, end_vmcode - vmcode);
+       addr[2048] = 2;
+       addr[2050] = 3;
+
+       memset(&v86, 0, sizeof(v86));
+
+       v86.regs.cs = load_addr / 16;
+       v86.regs.ss = load_addr / 16;
+       v86.regs.ds = load_addr / 16;
+       v86.regs.es = load_addr / 16;
+
+       assert((v86.regs.cs & 3) == 0); /* Looks like RPL = 0 */
+
+       /* #BR -- should deliver SIG??? */
+       do_test(&v86, vmcode_bound - vmcode, "#BR");
+
+       /* SYSENTER -- should cause #GP or #UD depending on CPU */
+       do_test(&v86, vmcode_sysenter - vmcode, "SYSENTER");
+
+       return (nerrs == 0 ? 0 : 1);
+}
diff --git a/tools/testing/selftests/x86/run_x86_tests.sh b/tools/testing/selftests/x86/run_x86_tests.sh
deleted file mode 100644 (file)
index 3fc19b3..0000000
+++ /dev/null
@@ -1,13 +0,0 @@
-#!/bin/bash
-
-# This is deliberately minimal.  IMO kselftests should provide a standard
-# script here.
-./sigreturn_32 || exit 1
-./single_step_syscall_32 || exit 1
-
-if [[ "$uname -p" -eq "x86_64" ]]; then
-    ./sigreturn_64 || exit 1
-    ./single_step_syscall_64 || exit 1
-fi
-
-exit 0
diff --git a/tools/testing/selftests/x86/sysret_ss_attrs.c b/tools/testing/selftests/x86/sysret_ss_attrs.c
new file mode 100644 (file)
index 0000000..ce42d5a
--- /dev/null
@@ -0,0 +1,112 @@
+/*
+ * sysret_ss_attrs.c - test that syscalls return valid hidden SS attributes
+ * Copyright (c) 2015 Andrew Lutomirski
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * On AMD CPUs, SYSRET can return with a valid SS descriptor with with
+ * the hidden attributes set to an unusable state.  Make sure the kernel
+ * doesn't let this happen.
+ */
+
+#define _GNU_SOURCE
+
+#include <stdlib.h>
+#include <unistd.h>
+#include <stdio.h>
+#include <string.h>
+#include <sys/mman.h>
+#include <err.h>
+#include <stddef.h>
+#include <stdbool.h>
+#include <pthread.h>
+
+static void *threadproc(void *ctx)
+{
+       /*
+        * Do our best to cause sleeps on this CPU to exit the kernel and
+        * re-enter with SS = 0.
+        */
+       while (true)
+               ;
+
+       return NULL;
+}
+
+#ifdef __x86_64__
+extern unsigned long call32_from_64(void *stack, void (*function)(void));
+
+asm (".pushsection .text\n\t"
+     ".code32\n\t"
+     "test_ss:\n\t"
+     "pushl $0\n\t"
+     "popl %eax\n\t"
+     "ret\n\t"
+     ".code64");
+extern void test_ss(void);
+#endif
+
+int main()
+{
+       /*
+        * Start a busy-looping thread on the same CPU we're on.
+        * For simplicity, just stick everything to CPU 0.  This will
+        * fail in some containers, but that's probably okay.
+        */
+       cpu_set_t cpuset;
+       CPU_ZERO(&cpuset);
+       CPU_SET(0, &cpuset);
+       if (sched_setaffinity(0, sizeof(cpuset), &cpuset) != 0)
+               printf("[WARN]\tsched_setaffinity failed\n");
+
+       pthread_t thread;
+       if (pthread_create(&thread, 0, threadproc, 0) != 0)
+               err(1, "pthread_create");
+
+#ifdef __x86_64__
+       unsigned char *stack32 = mmap(NULL, 4096, PROT_READ | PROT_WRITE,
+                                     MAP_32BIT | MAP_ANONYMOUS | MAP_PRIVATE,
+                                     -1, 0);
+       if (stack32 == MAP_FAILED)
+               err(1, "mmap");
+#endif
+
+       printf("[RUN]\tSyscalls followed by SS validation\n");
+
+       for (int i = 0; i < 1000; i++) {
+               /*
+                * Go to sleep and return using sysret (if we're 64-bit
+                * or we're 32-bit on AMD on a 64-bit kernel).  On AMD CPUs,
+                * SYSRET doesn't fix up the cached SS descriptor, so the
+                * kernel needs some kind of workaround to make sure that we
+                * end the system call with a valid stack segment.  This
+                * can be a confusing failure because the SS *selector*
+                * is the same regardless.
+                */
+               usleep(2);
+
+#ifdef __x86_64__
+               /*
+                * On 32-bit, just doing a syscall through glibc is enough
+                * to cause a crash if our cached SS descriptor is invalid.
+                * On 64-bit, it's not, so try extra hard.
+                */
+               call32_from_64(stack32 + 4088, test_ss);
+#endif
+       }
+
+       printf("[OK]\tWe survived\n");
+
+#ifdef __x86_64__
+       munmap(stack32, 4096);
+#endif
+
+       return 0;
+}
diff --git a/tools/testing/selftests/x86/thunks.S b/tools/testing/selftests/x86/thunks.S
new file mode 100644 (file)
index 0000000..ce8a995
--- /dev/null
@@ -0,0 +1,67 @@
+/*
+ * thunks.S - assembly helpers for mixed-bitness code
+ * Copyright (c) 2015 Andrew Lutomirski
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * These are little helpers that make it easier to switch bitness on
+ * the fly.
+ */
+
+       .text
+
+       .global call32_from_64
+       .type call32_from_64, @function
+call32_from_64:
+       // rdi: stack to use
+       // esi: function to call
+
+       // Save registers
+       pushq %rbx
+       pushq %rbp
+       pushq %r12
+       pushq %r13
+       pushq %r14
+       pushq %r15
+       pushfq
+
+       // Switch stacks
+       mov %rsp,(%rdi)
+       mov %rdi,%rsp
+
+       // Switch to compatibility mode
+       pushq $0x23  /* USER32_CS */
+       pushq $1f
+       lretq
+
+1:
+       .code32
+       // Call the function
+       call *%esi
+       // Switch back to long mode
+       jmp $0x33,$1f
+       .code64
+
+1:
+       // Restore the stack
+       mov (%rsp),%rsp
+
+       // Restore registers
+       popfq
+       popq %r15
+       popq %r14
+       popq %r13
+       popq %r12
+       popq %rbp
+       popq %rbx
+
+       ret
+
+.size call32_from_64, .-call32_from_64
index 2e231beb0a39e7c4b1571dc6ee746941ff069705..fabdf0f51621e30a850c510ffcce8841115c10f2 100644 (file)
@@ -4,6 +4,10 @@
  * GPL v2
  */
 
+#ifndef __i386__
+# error wrong architecture
+#endif
+
 #include <stdio.h>
 
 int main()
diff --git a/tools/testing/selftests/x86/trivial_64bit_program.c b/tools/testing/selftests/x86/trivial_64bit_program.c
new file mode 100644 (file)
index 0000000..b994946
--- /dev/null
@@ -0,0 +1,18 @@
+/*
+ * Trivial program to check that we have a valid 32-bit build environment.
+ * Copyright (c) 2015 Andy Lutomirski
+ * GPL v2
+ */
+
+#ifndef __x86_64__
+# error wrong architecture
+#endif
+
+#include <stdio.h>
+
+int main()
+{
+       printf("\n");
+
+       return 0;
+}
index 0788621c8d760f01f714d377a18c6a390a990fde..2e83dd3655a28ae606a0a2c8c4b80a58331ef24e 100644 (file)
@@ -12,10 +12,6 @@ TARGET=tmon
 INSTALL_PROGRAM=install -m 755 -p
 DEL_FILE=rm -f
 
-INSTALL_CONFIGFILE=install -m 644 -p
-CONFIG_FILE=
-CONFIG_PATH=
-
 # Static builds might require -ltinfo, for instance
 ifneq ($(findstring -static, $(LDFLAGS)),)
 STATIC := --static
@@ -38,13 +34,9 @@ valgrind: tmon
 install:
        - mkdir -p $(INSTALL_ROOT)/$(BINDIR)
        - $(INSTALL_PROGRAM) "$(TARGET)" "$(INSTALL_ROOT)/$(BINDIR)/$(TARGET)"
-       - mkdir -p $(INSTALL_ROOT)/$(CONFIG_PATH)
-       - $(INSTALL_CONFIGFILE) "$(CONFIG_FILE)" "$(INSTALL_ROOT)/$(CONFIG_PATH)"
 
 uninstall:
        $(DEL_FILE) "$(INSTALL_ROOT)/$(BINDIR)/$(TARGET)"
-       $(CONFIG_FILE) "$(CONFIG_PATH)"
-
 
 clean:
        find . -name "*.o" | xargs $(DEL_FILE)
index ac884b65a0725fc9b4e2ed46a490996524fd96a3..93aadaf7ff63d66afab325a67bd09fba982de947 100644 (file)
@@ -3,7 +3,7 @@
 TARGETS=page-types slabinfo page_owner_sort
 
 LIB_DIR = ../lib/api
-LIBS = $(LIB_DIR)/libapikfs.a
+LIBS = $(LIB_DIR)/libapi.a
 
 CC = $(CROSS_COMPILE)gcc
 CFLAGS = -Wall -Wextra -I../lib/